python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The base :class:`~composer.devices.device.Device` class."""
from abc import ABC, abstractmethod
from collections.abc import Mapping, Sequence
from typing import Any, Callable, TypeVar
import torch
import torch.nn
from torch.optim import Optimizer
from composer.core.serializable import Serializable
__all__ = ['Device', 'T_nnModule']
T_nnModule = TypeVar('T_nnModule', bound=torch.nn.Module)
T_Batch = TypeVar('T_Batch')
class Device(Serializable, ABC):
"""Abstract class for a device on which a model runs.
Attributes:
dist_backend (str): Distributed backend to use.
Should be ``gloo``, ``mpi``, or ``nccl``.
See `the pytorch docs <https://pytorch.org/docs/stable/distributed.html>`_
for details.
"""
dist_backend: str = ''
name: str = ''
_device = None
@abstractmethod
def module_to_device(self, module: T_nnModule) -> T_nnModule:
"""Invoked by the :class:`.Trainer` to move a ``module`` onto the device.
Args:
module (torch.nn.Module): The module to move to the device.
Returns:
torch.nn.Module: The module on the device.
"""
pass
@abstractmethod
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
"""Invoked by the :class:`.Trainer` to move a tensor onto a device.
Args:
tensor (Tensor): The tensor to move to the device.
Returns:
Tensor: The tensor on the device.
"""
pass
def batch_to_device(self, batch: T_Batch) -> T_Batch:
"""Invoked by the :class:`.Trainer` move all tensors items in a batch to device.
Supports nested sequences and mappings of tensors. Ignores non-tensor items. Preserves sequence and mapping types
when possible; otherwise, sequences are converted to lists, and mappings are converted to dictionaries.
Args:
batch (Any): The batch to move to the device.
Returns:
Batch: The batch on the device.
"""
def _to_device(x):
if isinstance(x, torch.Tensor):
return self.tensor_to_device(x)
return x
return _map_batch(batch, _to_device)
def optimizer_to_device(self, optimizer: Optimizer) -> Optimizer:
"""Invoked by the :class:`.Trainer` to move the optimizer's state onto the device.
Args:
optimizer (Optimizer): The optimizer to move to the device
Returns:
Optimizer: The optimizer on the device
"""
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = self.tensor_to_device(v)
return optimizer
def _map_batch(batch: Any, map_fn: Callable) -> Any:
"""Recursively maps a function to all items in a batch.
Args:
batch: Nested lists and dictionaries.
map_fn: A function to invoke on each element.
Returns:
Collections: The result of applying ``map_fn`` on each element of the ``batch``.
The type of ``batch`` is preserved.
"""
if isinstance(batch, Mapping):
return {k: _map_batch(v, map_fn) for k, v in batch.items()}
if isinstance(batch, Sequence) and not isinstance(batch, (str, bytes)):
try:
return type(batch)(_map_batch(x, map_fn) for x in batch) # type: ignore
except TypeError:
return [_map_batch(x, map_fn) for x in batch]
return map_fn(batch)
| composer-dev | composer/devices/device.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Module for devices on which models run."""
from composer.devices.device import Device
from composer.devices.device_cpu import DeviceCPU
from composer.devices.device_gpu import DeviceGPU
from composer.devices.device_mps import DeviceMPS
from composer.devices.device_tpu import DeviceTPU
__all__ = ['Device', 'DeviceCPU', 'DeviceGPU', 'DeviceMPS', 'DeviceTPU']
| composer-dev | composer/devices/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The CPU device used for training."""
from __future__ import annotations
import logging
from typing import Any, Dict, TypeVar
import torch
from composer.devices.device import Device
logger = logging.getLogger(__name__)
__all__ = ['DeviceCPU']
T_nnModule = TypeVar('T_nnModule', bound=torch.nn.Module)
class DeviceCPU(Device):
"""An extension of :class:`~composer.devices.device.Device` for CPUs.
This class takes no arguments.
"""
dist_backend = 'gloo'
name = 'cpu'
_device = torch.device('cpu')
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device)
def state_dict(self) -> Dict[str, Any]:
# CPU device has no RNG state
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if len(state) != 0:
raise ValueError('CPU device has no state.')
| composer-dev | composer/devices/device_cpu.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The Apple M-series device used for training."""
from __future__ import annotations
from typing import Any, Dict, TypeVar
import torch
import torch.cuda.amp
import torch.utils.data
from packaging import version
from composer.devices.device import Device
__all__ = ['DeviceMPS']
T_nnModule = TypeVar('T_nnModule', bound=torch.nn.Module)
class DeviceMPS(Device):
"""Device to support MPS, for training on Apple's M-series chips.
This class takes no arguments.
"""
dist_backend = ''
name = 'mps'
def __init__(self):
if version.parse(torch.__version__) < version.parse('1.12.0'):
raise RuntimeError('Support for MPS device requires torch >= 1.12.')
if not torch.backends.mps.is_available(): # type: ignore (version guarded)
raise RuntimeError('MPS requires MAC OSX >= 12.3')
if not torch.backends.mps.is_built(): # type: ignore (version guarded)
raise RuntimeError('torch was not build with MPS support.')
self._device = torch.device('mps')
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device)
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if len(state) != 0:
raise ValueError('MPS device has no state.')
| composer-dev | composer/devices/device_mps.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The TPU device used for training."""
from __future__ import annotations
import logging
from typing import Any, Dict, TypeVar
import torch
from composer.devices.device import Device
logger = logging.getLogger(__name__)
__all__ = ['DeviceTPU']
T_nnModule = TypeVar('T_nnModule', bound=torch.nn.Module)
class DeviceTPU(Device):
"""An extension of :class:`~composer.devices.device.Device` for TPUs.
When running on TPUVMs, you need to `export PJRT_DEVICE=TPU`.
More details.
"""
name = 'tpu'
def __init__(self):
import torch_xla.core.xla_model as xm
self._device = xm.xla_device()
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device)
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if len(state) != 0:
raise ValueError('TPU device has no state.')
| composer-dev | composer/devices/device_tpu.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The GPU device used for training."""
from __future__ import annotations
from typing import Any, Dict, Optional, TypeVar
import torch
import torch.backends.cuda
import torch.backends.cudnn
import torch.cuda
import torch.cuda.amp
import torch.utils.data
from composer.devices.device import Device
from composer.utils import dist
__all__ = ['DeviceGPU']
T_nnModule = TypeVar('T_nnModule', bound=torch.nn.Module)
class DeviceGPU(Device):
"""An extension of :class:`~composer.devices.device.Device` for GPUs.
Args:
device_id (int, optional): Integer ID of a GPU device to train with. If not specified, the local rank
of the current process is used. Default: None.
allow_tf32 (bool, optional): Whether to allow TF32 matrix multiplications. Defaults to True.
For more information, see :ref:`torch:tf32_on_ampere`.
"""
dist_backend = 'nccl'
name = 'gpu'
def __init__(
self,
device_id: Optional[int] = None,
*,
allow_tf32: bool = True,
):
if not torch.cuda.is_available():
raise ValueError('DeviceGPU cannot be created as torch.cuda is not available.')
if device_id is None:
device_id = dist.get_local_rank()
self._device = torch.device(f'cuda:{device_id}')
torch.cuda.set_device(self._device)
assert torch.cuda.current_device() == device_id
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
# pyright error: "allow_tf32" is not a known member of module
# however, this flag exists on pytorch 1.9+: https://pytorch.org/docs/1.9.0/backends.html
torch.backends.cudnn.allow_tf32 = allow_tf32 # type: ignore
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device, non_blocking=True)
def state_dict(self) -> Dict[str, Any]:
return {
'rng': torch.cuda.get_rng_state(),
}
def load_state_dict(self, state: Dict[str, Any]) -> None:
torch.cuda.set_rng_state(state['rng'])
| composer-dev | composer/devices/device_gpu.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import subprocess
import sys
import textwrap
from pathlib import Path
from typing import List
from unittest.mock import Mock
import pytest
import composer
from composer.core import Engine, Event
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.state import State
from composer.loggers import Logger, LoggerDestination
from tests.common.events import EventCounterCallback
@pytest.fixture
def always_match_algorithms():
return [
Mock(**{
'match.return.value': True,
'apply.return_value': n, # return encodes order
'interpolate_loss': False,
}) for n in range(5)
]
@pytest.fixture()
def dummy_logger(dummy_state: State):
return Logger(dummy_state)
@pytest.fixture
def never_match_algorithms():
attrs = {'match.return_value': False}
return [Mock(**attrs) for _ in range(5)]
def run_event(event: Event, state: State, logger: Logger):
runner = Engine(state, logger)
return runner.run_event(event)
class DummyCallback(Callback):
def __init__(self, file_path):
self.file_path = file_path
def init(self, state: State, logger: Logger):
with open(self.file_path, 'a') as f:
f.write('init callback, ')
def batch_end(self, state: State, logger: Logger):
with open(self.file_path, 'a') as f:
f.write('on_batch_end callback, ')
class DummyLoggerDestination(LoggerDestination):
def __init__(self, file_path):
self.file_path = file_path
def init(self, state: State, logger: Logger):
with open(self.file_path, 'a') as f:
f.write('init logger, ')
def batch_end(self, state: State, logger: Logger):
with open(self.file_path, 'a') as f:
f.write('on_batch_end logger, ')
def test_engine_runs_callbacks_in_correct_order(dummy_state, tmp_path):
file_path = tmp_path / Path('event_check.txt')
dummy_state.callbacks = [DummyCallback(file_path), DummyLoggerDestination(file_path)]
logger = Logger(dummy_state)
engine = Engine(dummy_state, logger)
engine.run_event(Event.INIT)
engine.run_event(Event.BATCH_END)
engine.run_event(Event.EPOCH_END)
engine.close()
expected_lines = ['init logger, init callback, on_batch_end callback, on_batch_end logger, ']
with open(file_path, 'r') as f:
actual_lines = f.readlines()
assert expected_lines == actual_lines
@pytest.mark.parametrize('event', list(Event))
class TestAlgorithms:
def test_algorithms_always_called(self, event: Event, dummy_state: State, always_match_algorithms: List[Algorithm],
dummy_logger: Logger):
dummy_state.algorithms = always_match_algorithms
_ = run_event(event, dummy_state, dummy_logger)
for algo in always_match_algorithms:
algo.apply.assert_called_once()
algo.match.assert_called_once()
def test_algorithms_never_called(self, event: Event, dummy_state: State, never_match_algorithms: List[Algorithm],
dummy_logger: Logger):
dummy_state.algorithms = never_match_algorithms
_ = run_event(event, dummy_state, dummy_logger)
for algo in never_match_algorithms:
algo.apply.assert_not_called()
algo.match.assert_called_once()
def test_engine_trace_all(self, event: Event, dummy_state: State, always_match_algorithms: List[Algorithm],
dummy_logger: Logger):
dummy_state.algorithms = always_match_algorithms
trace = run_event(event, dummy_state, dummy_logger)
assert all([tr.run for tr in trace.values()])
def test_engine_trace_never(self, event: Event, dummy_state: State, never_match_algorithms: List[Algorithm],
dummy_logger: Logger):
dummy_state.algorithms = never_match_algorithms
trace = run_event(event, dummy_state, dummy_logger)
assert all([tr.run is False for tr in trace.values()])
def test_engine_is_dead_after_close(dummy_state: State, dummy_logger: Logger):
# Create the trainer and run an event
engine = Engine(dummy_state, dummy_logger)
engine.run_event(Event.INIT)
# Close it
engine.close()
# Assert it complains if you try to run another event
with pytest.raises(RuntimeError):
engine.run_event(Event.FIT_START)
class IsClosedCallback(Callback):
def __init__(self) -> None:
self.is_closed = True
def init(self, state: State, logger: Logger) -> None:
assert self.is_closed
self.is_closed = False
def close(self, state: State, logger: Logger) -> None:
self.is_closed = True
def test_engine_closes_on_del(dummy_state: State, dummy_logger: Logger):
# Create the trainer and run an event
is_closed_callback = IsClosedCallback()
dummy_state.callbacks.append(is_closed_callback)
engine = Engine(dummy_state, dummy_logger)
engine.run_event(Event.INIT)
# Assert that there is just 2 -- once above, and once as the arg temp reference
assert sys.getrefcount(engine) == 2
# Implicitly close the engine
del engine
# Assert it is closed
assert is_closed_callback.is_closed
class DummyTrainer:
"""Helper to simulate what the trainer does w.r.t. events"""
def __init__(self, state: State, logger: Logger) -> None:
self.engine = Engine(state, logger)
self.engine.run_event(Event.INIT)
def close(self):
self.engine.close()
def test_engine_triggers_close_only_once(dummy_state: State, dummy_logger: Logger):
# Create the trainer and run an event
is_closed_callback = IsClosedCallback()
dummy_state.callbacks.append(is_closed_callback)
# Create the trainer
trainer = DummyTrainer(dummy_state, dummy_logger)
# Close the trainer
trainer.close()
# Assert it is closed
assert is_closed_callback.is_closed
# Create a new trainer with the same callback. Should implicitly trigger __del__ AFTER
# AFTER DummyTrainer was constructed
trainer = DummyTrainer(dummy_state, dummy_logger)
# Assert it is open
assert not is_closed_callback.is_closed
def test_engine_errors_if_previous_trainer_was_not_closed(dummy_state: State, dummy_logger: Logger):
# Create the trainer and run an event
is_closed_callback = IsClosedCallback()
dummy_state.callbacks.append(is_closed_callback)
# Create the trainer
_ = DummyTrainer(dummy_state, dummy_logger)
# Assert the callback is open
assert not is_closed_callback.is_closed
# Create a new trainer with the same callback. Should raise an exception
# because trainer.close() was not called before
with pytest.raises(RuntimeError,
match=r'Cannot create a new trainer with an open callback or logger from a previous trainer'):
DummyTrainer(dummy_state, dummy_logger)
def check_output(proc: subprocess.CompletedProcess):
# Check the subprocess output, and raise an exception with the stdout/stderr dump if there was a non-zero exit
# The `check=True` flag available in `subprocess.run` does not print stdout/stderr
if proc.returncode == 0:
return
error_msg = textwrap.dedent(f"""\
Command {proc.args} failed with exit code {proc.returncode}.
----Begin stdout----
{proc.stdout}
----End stdout------
----Begin stderr----
{proc.stderr}
----End stderr------""")
raise RuntimeError(error_msg)
@pytest.mark.parametrize('exception', [True, False])
def test_engine_closes_on_atexit(exception: bool):
# Running this test via a subprocess, as atexit() must trigger
code = textwrap.dedent("""\
from composer import Trainer, Callback
from tests.common import SimpleModel
class CallbackWithConditionalCloseImport(Callback):
def post_close(self):
import requests
model = SimpleModel(3, 10)
cb = CallbackWithConditionalCloseImport()
trainer = Trainer(
model=model,
callbacks=[cb],
max_duration="1ep",
train_dataloader=None,
)
""")
if exception:
# Should raise an exception, since no dataloader was provided
code += 'trainer.fit()'
git_root_dir = os.path.join(os.path.dirname(composer.__file__), '..')
proc = subprocess.run(['python', '-c', code], cwd=git_root_dir, text=True, capture_output=True)
if exception:
# manually validate that there was no a conditional import exception
assert 'ImportError: sys.meta_path is None, Python is likely shutting down' not in proc.stderr
else:
check_output(proc)
def test_logging(
caplog: pytest.LogCaptureFixture,
dummy_state: State,
dummy_logger: Logger,
monkeypatch: pytest.MonkeyPatch,
):
"""Test that engine logs statements as expected"""
caplog.set_level(logging.DEBUG, logger=Engine.__module__)
# Include a callback, since most logging happens around callback events
dummy_state.callbacks = [EventCounterCallback()]
monkeypatch.setenv('ENGINE_DEBUG', '1')
engine = Engine(dummy_state, dummy_logger)
engine.run_event('INIT')
engine.close()
# Validate that we have the expected log entries
assert caplog.record_tuples == [
('composer.core.engine', 10, '[ep=0][ba=0][event=INIT]: Running event'),
('composer.core.engine', 10, '[ep=0][ba=0][event=INIT]: Running callback EventCounterCallback'),
('composer.core.engine', 10, 'Closing the engine'),
('composer.core.engine', 10, 'Closing callback EventCounterCallback'),
('composer.core.engine', 10, 'Post-closing callback EventCounterCallback'),
]
| composer-dev | tests/test_engine.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
from typing import List, Optional
import pytest
from composer.utils import reproducibility
# Allowed options for pytest.mark.world_size()
# Important: when updating this list, make sure to also up ./.ci/test.sh
# (so tests of all world sizes will be executed) and tests/README.md
# (so the documentation is correct)
WORLD_SIZE_OPTIONS = (1, 2)
# Enforce deterministic mode before any tests start.
reproducibility.configure_deterministic_mode()
# Add the path of any pytest fixture files you want to make global
pytest_plugins = [
'tests.fixtures.autouse_fixtures',
'tests.fixtures.fixtures',
]
def _add_option(parser: pytest.Parser, name: str, help: str, choices: Optional[List[str]] = None):
parser.addoption(
f'--{name}',
default=None,
type=str,
choices=choices,
help=help,
)
parser.addini(
name=name,
help=help,
type='string',
default=None,
)
def _get_option(config: pytest.Config, name: str, default: Optional[str] = None) -> str: # type: ignore
val = config.getoption(name)
if val is not None:
assert isinstance(val, str)
return val
val = config.getini(name)
if val == []:
val = None
if val is None:
if default is None:
pytest.fail(f'Config option {name} is not specified but is required')
val = default
assert isinstance(val, str)
return val
def pytest_addoption(parser: pytest.Parser) -> None:
_add_option(parser,
'seed',
help="""\
Rank zero seed to use. `reproducibility.seed_all(seed + dist.get_global_rank())` will be invoked
before each test.""")
_add_option(parser, 's3_bucket', help='S3 Bucket for integration tests')
def _get_world_size(item: pytest.Item):
"""Returns the world_size of a test, defaults to 1."""
_default = pytest.mark.world_size(1).mark
return item.get_closest_marker('world_size', default=_default).args[0]
def pytest_collection_modifyitems(config: pytest.Config, items: List[pytest.Item]) -> None:
"""Filter tests by world_size (for multi-GPU tests) and duration (short, long, or all)"""
world_size = int(os.environ.get('WORLD_SIZE', '1'))
conditions = [
lambda item: _get_world_size(item) == world_size,
]
# keep items that satisfy all conditions
remaining = []
deselected = []
for item in items:
if all([condition(item) for condition in conditions]):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
# Note: These methods are an alternative to the tiny_bert fixtures in fixtures.py.
# Fixtures cannot be used natively as parametrized inputs, which we require when
# we wish to run a test across multiple models, one of which is a HuggingFace BERT Tiny.
# As a workaround, we inject objects into the PyTest namespace. Tests should not directly
# use pytest.{var}, but instead should import and use the helper copy methods configure_{var}
# (in tests.common.models) so the objects in the PyTest namespace do not change.
def pytest_configure():
try:
import transformers
del transformers
TRANSFORMERS_INSTALLED = True
except ImportError:
TRANSFORMERS_INSTALLED = False
if TRANSFORMERS_INSTALLED:
from tests.fixtures.fixtures import (tiny_bert_config_helper, tiny_bert_model_helper,
tiny_bert_tokenizer_helper, tiny_gpt2_config_helper,
tiny_gpt2_model_helper, tiny_gpt2_tokenizer_helper, tiny_t5_config_helper,
tiny_t5_model_helper, tiny_t5_tokenizer_helper)
pytest.tiny_bert_config = tiny_bert_config_helper() # type: ignore
pytest.tiny_bert_model = tiny_bert_model_helper(pytest.tiny_bert_config) # type: ignore
pytest.tiny_bert_tokenizer = tiny_bert_tokenizer_helper() # type: ignore
pytest.tiny_gpt2_config = tiny_gpt2_config_helper() # type: ignore
pytest.tiny_gpt2_model = tiny_gpt2_model_helper(pytest.tiny_gpt2_config) # type: ignore
pytest.tiny_gpt2_tokenizer = tiny_gpt2_tokenizer_helper() # type: ignore
pytest.tiny_t5_config = tiny_t5_config_helper() # type: ignore
pytest.tiny_t5_model = tiny_t5_model_helper(pytest.tiny_t5_config) # type: ignore
pytest.tiny_t5_tokenizer = tiny_t5_tokenizer_helper() # type: ignore
def pytest_sessionfinish(session: pytest.Session, exitstatus: int):
if exitstatus == 5:
session.exitstatus = 0 # Ignore no-test-ran errors
| composer-dev | tests/conftest.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import datetime
import pytest
from composer.core.time import Time, Timestamp, TimeUnit
@pytest.mark.parametrize('time_string,expected_value,expected_unit', [
['1ep', 1, TimeUnit.EPOCH],
['2ba', 2, TimeUnit.BATCH],
['3e10sp', 3 * 10**10, TimeUnit.SAMPLE],
['4tok', 4, TimeUnit.TOKEN],
['0.5dur', 0.5, TimeUnit.DURATION],
])
def test_time_parse(time_string: str, expected_value: int, expected_unit: TimeUnit):
time = Time.from_timestring(time_string)
assert time.value == expected_value
assert time.unit == expected_unit
@pytest.mark.parametrize('expected_timestring,time', [
['1ep', Time(1, TimeUnit.EPOCH)],
['2ba', Time(2, TimeUnit.BATCH)],
['3sp', Time(3, TimeUnit.SAMPLE)],
['4tok', Time(4, TimeUnit.TOKEN)],
['0.5dur', Time(0.5, TimeUnit.DURATION)],
])
def test_to_timestring(expected_timestring: str, time: Time):
assert time.to_timestring() == expected_timestring
def test_time_math():
t1 = Time.from_timestring('1ep')
t2 = Time.from_timestring('2ep')
t3 = Time.from_timestring('3ep')
t4 = Time.from_timestring('0.5dur')
assert t1 + t2 == t3
assert t2 - t1 == t1
assert t1 - t2 == -t1
assert t1 < t2
assert t1 <= t2
assert t2 > t1
assert t2 >= t1
assert t3 >= t3
assert t3 <= t3
assert t4 * t2 == t1
assert 0.5 * t2 == t1
assert t4 * 2 == Time.from_timestring('1dur')
assert t1 / t2 == t4
assert t2 / 2 == t1
def test_time_repr():
time = Time(1, 'tok')
assert repr(time) == 'Time(1, TimeUnit.TOKEN)'
assert eval(repr(time)) == time
def test_timestamp():
timestamp = Timestamp()
time = Time(10, 'ep')
assert timestamp < time
assert timestamp.get(time.unit) == Time.from_epoch(0)
def test_timestamp_update():
timestamp = Timestamp(epoch=1)
timestamp_2 = timestamp.copy(batch=2)
assert timestamp_2.epoch == 1
assert timestamp_2.batch == 2
assert timestamp_2.sample == 0
assert timestamp is not timestamp_2
def test_timestamp_to_next_batch_epoch():
timestamp = Timestamp()
# Step batch 0, epoch 0
timestamp = timestamp.to_next_batch(10, 20, datetime.timedelta(seconds=5))
assert timestamp.batch == 1
assert timestamp.batch_in_epoch == 1
assert timestamp.batch_in_epoch == 1
assert timestamp.sample == 10
assert timestamp.sample_in_epoch == 10
assert timestamp.token == 20
assert timestamp.token_in_epoch == 20
assert timestamp.total_wct == datetime.timedelta(seconds=5)
assert timestamp.epoch_wct == datetime.timedelta(seconds=5)
assert timestamp.batch_wct == datetime.timedelta(seconds=5)
# Finish epoch 0
timestamp = timestamp.to_next_epoch()
assert timestamp.epoch == 1
assert timestamp.batch == 1
assert timestamp.batch_in_epoch == 0
assert timestamp.sample == 10
assert timestamp.sample_in_epoch == 0
assert timestamp.token == 20
assert timestamp.token_in_epoch == 0
assert timestamp.total_wct == datetime.timedelta(seconds=5)
assert timestamp.epoch_wct == datetime.timedelta(seconds=0)
assert timestamp.batch_wct == datetime.timedelta(seconds=0)
# Step a batch 0 in epoch 1
timestamp = timestamp.to_next_batch(5, 0, datetime.timedelta(seconds=10))
assert timestamp.epoch == 1
assert timestamp.batch == 2
assert timestamp.batch_in_epoch == 1
assert timestamp.sample == 15
assert timestamp.sample_in_epoch == 5
assert timestamp.token == 20
assert timestamp.token_in_epoch == 0
assert timestamp.total_wct == datetime.timedelta(seconds=15)
assert timestamp.epoch_wct == datetime.timedelta(seconds=10)
assert timestamp.batch_wct == datetime.timedelta(seconds=10)
# Step batch 1 in epoch 0
timestamp = timestamp.to_next_batch(5, 1, datetime.timedelta(seconds=10))
assert timestamp.epoch == 1
assert timestamp.batch == 3
assert timestamp.batch_in_epoch == 2
assert timestamp.sample == 20
assert timestamp.sample_in_epoch == 10
assert timestamp.token == 21
assert timestamp.token_in_epoch == 1
assert timestamp.total_wct == datetime.timedelta(seconds=25)
assert timestamp.epoch_wct == datetime.timedelta(seconds=20)
assert timestamp.batch_wct == datetime.timedelta(seconds=10)
def test_timestamp_repr():
timestamp = Timestamp()
assert timestamp == eval(repr(timestamp))
@pytest.mark.parametrize('time_string', ['1.5ep', '2.1ba', '3.2sp', '3.4tok'])
def test_timestep_bad_strings(time_string: str):
with pytest.raises(TypeError):
Time.from_timestring(time_string)
@pytest.mark.parametrize('time_string', ['0.5dur', '2.0ep', '3.000ba', '030.0sp'])
def test_timestep_valid_strings(time_string: str):
Time.from_timestring(time_string)
| composer-dev | tests/test_time.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.distributed
from torch.utils.data import DataLoader
from composer import Trainer
from composer.core import Precision
from composer.models import composer_resnet_cifar
from tests.common import RandomImageDataset
try:
import transformer_engine.pytorch as te
del te
te_installed = True
except ImportError:
te_installed = False
def get_trainer(precision: Precision) -> Trainer:
return Trainer(
model=composer_resnet_cifar('resnet_9'),
train_dataloader=DataLoader(
dataset=RandomImageDataset(size=1024),
batch_size=512,
persistent_workers=False,
num_workers=0,
),
eval_dataloader=DataLoader(
dataset=RandomImageDataset(size=1024),
batch_size=512,
persistent_workers=False,
num_workers=0,
),
precision=precision,
max_duration='1ep',
eval_interval='1ep',
train_subset_num_batches=1,
)
def fit_and_measure_memory(precision) -> int:
trainer = get_trainer(precision)
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
trainer.fit()
return torch.cuda.max_memory_allocated()
def eval_and_measure_memory(precision) -> int:
trainer = get_trainer(precision)
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
trainer.eval()
return torch.cuda.max_memory_allocated()
def predict_and_measure_memory(precision) -> int:
trainer = get_trainer(precision)
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
trainer.predict(dataloader=trainer.state.evaluators[0].dataloader)
return torch.cuda.max_memory_allocated()
@pytest.mark.gpu
@pytest.mark.parametrize('precision', [Precision.AMP_FP16, Precision.AMP_BF16])
def test_train_precision_memory(precision: Precision):
memory_fp32 = fit_and_measure_memory(Precision.FP32)
memory_half = fit_and_measure_memory(precision)
assert memory_half < 0.7 * memory_fp32
@pytest.mark.gpu
@pytest.mark.parametrize('precision', [Precision.AMP_FP16, Precision.AMP_BF16])
def test_eval_precision_memory(precision: Precision):
memory_fp32 = eval_and_measure_memory(Precision.FP32)
memory_half = eval_and_measure_memory(precision)
assert memory_half < 0.95 * memory_fp32
@pytest.mark.gpu
@pytest.mark.parametrize('precision', [Precision.AMP_FP16, Precision.AMP_BF16])
def test_predict_precision_memory(precision: Precision):
memory_fp32 = predict_and_measure_memory(Precision.FP32)
memory_half = predict_and_measure_memory(precision)
assert memory_half < 0.95 * memory_fp32
@pytest.mark.gpu
def test_amp_fp8_path():
trainer = get_trainer(Precision.AMP_FP8)
if te_installed:
if torch.cuda.get_device_capability()[0] < 9:
with pytest.raises(RuntimeError, match='AMP_FP8 precision is used but current device does not support it'):
trainer.fit()
else:
trainer.fit()
else:
with pytest.raises(ImportError, match='AMP_FP8 precision is used but TransformerEngine is not installed'):
trainer.fit()
| composer-dev | tests/test_precision.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.utils.data import DataLoader
from composer.trainer import Trainer
from composer.utils import dist
from tests.common.datasets import RandomTextClassificationDataset, RandomTextLMDataset
from tests.common.models import SimpleTransformerClassifier, SimpleTransformerMaskedLM
def test_simple_nlp_classification():
vocab_size = 100
sequence_length = 32
num_classes = 2
size = 96
batch_size = 8
train_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes)
eval_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes)
predict_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes)
model = SimpleTransformerClassifier(vocab_size=vocab_size, num_classes=num_classes)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size, sampler=dist.get_sampler(eval_dataset))
predict_dataloader = DataLoader(predict_dataset, batch_size=8)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration='2ep',
eval_dataloader=eval_dataloader,
)
trainer.fit()
trainer.eval()
# Check that there is some train/eval accuracy
assert trainer.state.train_metrics['MulticlassAccuracy'].compute() != 0.0
assert trainer.state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
predictions = trainer.predict(predict_dataloader)
# Check that the output predictions are the expected shape
num_predict_batches_expected = ((size - 1) // batch_size) + 1
assert len(predictions) == num_predict_batches_expected
assert predictions[0].shape == (batch_size, 2)
def test_simple_nlp_mlm(tiny_bert_tokenizer, tiny_bert_model):
transformers = pytest.importorskip('transformers')
vocab_size = tiny_bert_tokenizer.vocab_size
sequence_length = 32
size = 96
batch_size = 8
train_dataset = RandomTextLMDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
use_keys=True)
eval_dataset = RandomTextLMDataset(size=size, vocab_size=vocab_size, sequence_length=sequence_length, use_keys=True)
predict_dataset = RandomTextLMDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
use_keys=True)
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tiny_bert_tokenizer, mlm_probability=0.15)
model = SimpleTransformerMaskedLM(vocab_size=vocab_size)
train_dataloader = DataLoader(train_dataset,
batch_size=batch_size,
sampler=dist.get_sampler(train_dataset),
collate_fn=collator)
eval_dataloader = DataLoader(eval_dataset,
batch_size=batch_size,
sampler=dist.get_sampler(eval_dataset),
collate_fn=collator)
predict_dataloader = DataLoader(predict_dataset, batch_size=8)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration='2ep',
eval_dataloader=eval_dataloader,
)
trainer.fit()
trainer.eval()
# Check that there is some train/eval cross entropy
assert trainer.state.train_metrics['LanguageCrossEntropy'].compute() != 0.0
assert trainer.state.eval_metrics['eval']['LanguageCrossEntropy'].compute() != 0.0
predictions = trainer.predict(predict_dataloader)
# Check that the output predictions are the expected shape
num_predict_batches_expected = ((size - 1) // batch_size) + 1
assert len(predictions) == num_predict_batches_expected
assert predictions[0].shape == (batch_size, sequence_length, vocab_size)
| composer-dev | tests/test_simple_nlp.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import os
import pytest
import torch
from torch.utils.data import DataLoader
from torchmetrics.classification import MulticlassAccuracy
from composer.algorithms import GatedLinearUnits
from composer.loggers import RemoteUploaderDownloader
from composer.metrics.nlp import LanguageCrossEntropy, MaskedAccuracy
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
from composer.utils import dist, get_device, inference, reproducibility
from tests.common import device
from tests.common.datasets import RandomTextClassificationDataset, RandomTextLMDataset
from tests.common.models import SimpleTransformerClassifier, SimpleTransformerMaskedLM
def get_model_embeddings(model):
if isinstance(model, HuggingFaceModel):
return model.model.bert.embeddings.word_embeddings.weight
elif isinstance(model, SimpleTransformerClassifier) or isinstance(model, SimpleTransformerMaskedLM):
return model.transformer_base.embedding.weight
else:
raise ValueError('Unsure how to get embeddings layer from model.')
def pretraining_test_helper(tokenizer, model, algorithms, tmp_path, device):
transformers = pytest.importorskip('transformers')
pretraining_model_copy = copy.deepcopy(model)
pretraining_train_dataset = RandomTextLMDataset(size=8,
vocab_size=tokenizer.vocab_size,
sequence_length=4,
use_keys=True)
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
pretraining_train_dataloader = DataLoader(pretraining_train_dataset,
batch_size=4,
sampler=dist.get_sampler(pretraining_train_dataset),
collate_fn=collator)
pretraining_eval_dataloader = DataLoader(pretraining_train_dataset,
batch_size=4,
sampler=dist.get_sampler(pretraining_train_dataset),
collate_fn=collator)
pretraining_trainer = Trainer(model=pretraining_model_copy,
train_dataloader=pretraining_train_dataloader,
save_folder=str(tmp_path / 'pretraining_checkpoints'),
max_duration='1ep',
seed=17,
algorithms=algorithms,
device=device)
pretraining_trainer.fit()
reproducibility.seed_all(17) # seed so that the masking is the same
pretraining_trainer.eval(pretraining_eval_dataloader)
loaded_pretraining_trainer = Trainer(model=model,
load_path=str(tmp_path / 'pretraining_checkpoints' / 'latest-rank0.pt'),
seed=17,
algorithms=algorithms,
device=device)
reproducibility.seed_all(17) # seed so that the masking is the same
loaded_pretraining_trainer.eval(pretraining_eval_dataloader)
original_ce = pretraining_trainer.state.eval_metrics['eval']['LanguageCrossEntropy']
loaded_ce = loaded_pretraining_trainer.state.eval_metrics['eval']['LanguageCrossEntropy']
assert original_ce.compute() > 0.0
assert original_ce.compute() == loaded_ce.compute()
return str(tmp_path / 'pretraining_checkpoints' / 'latest-rank0.pt')
def finetuning_test_helper(tokenizer, model, algorithms, checkpoint_path, pretraining_model, tmp_path, device):
finetuning_model_copy = copy.deepcopy(model)
finetuning_train_dataset = RandomTextClassificationDataset(size=8,
vocab_size=tokenizer.vocab_size,
sequence_length=4,
num_classes=3,
use_keys=isinstance(model, HuggingFaceModel))
finetuning_train_dataloader = DataLoader(finetuning_train_dataset,
batch_size=4,
sampler=dist.get_sampler(finetuning_train_dataset))
finetuning_eval_dataloader = DataLoader(finetuning_train_dataset,
batch_size=4,
sampler=dist.get_sampler(finetuning_train_dataset))
remote_dir = str(tmp_path / 'object_store')
os.makedirs(remote_dir, exist_ok=True)
rud = RemoteUploaderDownloader(
bucket_uri='libcloud://.',
backend_kwargs={
'provider': 'local',
'container': '.',
'provider_kwargs': {
'key': remote_dir,
},
},
num_concurrent_uploads=1,
use_procs=False,
upload_staging_folder=str(tmp_path / 'staging_folder'),
)
finetuning_embedding_layer = get_model_embeddings(model)
pretraining_embedding_layer = get_model_embeddings(pretraining_model)
# The pretraining weights have not yet been loaded into the finetuning model
assert not torch.equal(finetuning_embedding_layer.cpu(), pretraining_embedding_layer.cpu())
finetuning_trainer = Trainer(model=model,
train_dataloader=finetuning_train_dataloader,
save_folder='finetuning_checkpoints',
load_path=checkpoint_path,
load_weights_only=True,
loggers=[rud],
max_duration='1ep',
seed=17,
algorithms=algorithms,
device=device)
# Now they have been loaded
assert torch.equal(finetuning_embedding_layer.cpu(), pretraining_embedding_layer.cpu())
finetuning_trainer.fit()
finetuning_trainer.eval(finetuning_eval_dataloader)
loaded_finetuning_trainer = Trainer(model=finetuning_model_copy,
load_path='finetuning_checkpoints/latest-rank0.pt',
load_object_store=rud,
seed=17,
algorithms=algorithms,
device=device)
loaded_finetuning_trainer.eval(finetuning_eval_dataloader)
original_acc = finetuning_trainer.state.eval_metrics['eval']['MulticlassAccuracy']
loaded_acc = loaded_finetuning_trainer.state.eval_metrics['eval']['MulticlassAccuracy']
assert original_acc.compute() > 0.0
assert original_acc.compute() == loaded_acc.compute()
return loaded_finetuning_trainer, finetuning_eval_dataloader, rud, 'finetuning_checkpoints/latest-rank0.pt'
def inference_test_helper(finetuning_output_path, rud, finetuning_model, algorithms, original_input, original_output,
tmp_path, save_format, device):
inference_trainer = Trainer(model=finetuning_model,
load_path=finetuning_output_path,
load_weights_only=True,
loggers=[rud],
seed=17,
algorithms=algorithms,
device=device)
os.mkdir(tmp_path / 'inference_checkpoints')
sample_input = (original_input, {})
inference.export_for_inference(model=inference_trainer.state.model,
save_format=save_format,
save_path=str(tmp_path / 'inference_checkpoints' / f'exported_model.{save_format}'),
sample_input=sample_input)
copied_batch = copy.deepcopy(original_input)
if save_format == 'onnx':
onnx = pytest.importorskip('onnx')
ort = pytest.importorskip('onnxruntime')
loaded_inference_model = onnx.load(str(tmp_path / 'inference_checkpoints' / 'exported_model.onnx'))
onnx.checker.check_model(loaded_inference_model)
ort_session = ort.InferenceSession(str(tmp_path / 'inference_checkpoints' / 'exported_model.onnx'))
for key, value in copied_batch.items():
copied_batch[key] = value.numpy()
loaded_model_out = ort_session.run(None, copied_batch)
elif save_format == 'torchscript':
loaded_inference_model = torch.jit.load(str(tmp_path / 'inference_checkpoints' / 'exported_model.torchscript'))
loaded_inference_model.eval()
loaded_model_out = loaded_inference_model(copied_batch)
else:
raise ValueError('Unsupported save format')
torch.testing.assert_close(
loaded_model_out[1] if isinstance(loaded_model_out, list) else loaded_model_out.detach().numpy(),
original_output.detach().numpy()
if isinstance(original_output, torch.Tensor) else original_output.logits.detach().numpy())
@device('cpu', 'gpu')
# Note: the specificity of these settings are due to incompatibilities (e.g. the simpletransformer model is not traceable)
@pytest.mark.parametrize('model_type,algorithms,save_format', [('tinybert_hf', [GatedLinearUnits], 'onnx'),
('simpletransformer', [], 'torchscript')])
def test_full_nlp_pipeline(model_type, algorithms, save_format, tiny_bert_tokenizer, tmp_path, request, device):
"""This test is intended to exercise our full pipeline for NLP.
To this end, it performs pretraining, loads the pretrained model with a classification head for finetuning
and finetunes it, exports the model for inference, and loads it back in to make predictions.
"""
pytest.importorskip('libcloud')
pytest.importorskip('transformers')
algorithms = [algorithm() for algorithm in algorithms]
device = get_device(device)
tiny_bert_model = None
if model_type == 'tinybert_hf':
tiny_bert_model = request.getfixturevalue('tiny_bert_model')
# pretraining
if model_type == 'tinybert_hf':
assert tiny_bert_model is not None
pretraining_metrics = [LanguageCrossEntropy(ignore_index=-100), MaskedAccuracy(ignore_index=-100)]
pretraining_model = HuggingFaceModel(tiny_bert_model,
tiny_bert_tokenizer,
use_logits=True,
metrics=pretraining_metrics)
elif model_type == 'simpletransformer':
pretraining_model = SimpleTransformerMaskedLM(vocab_size=tiny_bert_tokenizer.vocab_size)
else:
raise ValueError('Unsupported model type')
pretraining_output_path = pretraining_test_helper(tiny_bert_tokenizer, pretraining_model, algorithms, tmp_path,
device)
# finetuning
if model_type == 'tinybert_hf':
finetuning_metric = MulticlassAccuracy(num_classes=3, average='micro')
hf_finetuning_model, _ = HuggingFaceModel.hf_from_composer_checkpoint(
pretraining_output_path,
model_instantiation_class='transformers.AutoModelForSequenceClassification',
model_config_kwargs={'num_labels': 3})
finetuning_model = HuggingFaceModel(model=hf_finetuning_model,
tokenizer=tiny_bert_tokenizer,
use_logits=True,
metrics=[finetuning_metric])
elif model_type == 'simpletransformer':
finetuning_model = SimpleTransformerClassifier(vocab_size=tiny_bert_tokenizer.vocab_size, num_classes=3)
else:
raise ValueError('Unsupported model type.')
finetuning_model_copy = copy.deepcopy(finetuning_model)
finetuning_trainer, finetuning_dataloader, rud, finetuning_output_path = finetuning_test_helper(
tiny_bert_tokenizer, finetuning_model, algorithms, pretraining_output_path, pretraining_model, tmp_path, device)
# inference
batch = next(iter(finetuning_dataloader))
finetuning_trainer.state.model.to('cpu')
finetuning_trainer.state.model.eval()
original_output = finetuning_trainer.state.model(batch)
inference_test_helper(finetuning_output_path, rud, finetuning_model_copy, algorithms, batch, original_output,
tmp_path, save_format, device)
| composer-dev | tests/test_full_nlp.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Pytest stub for running lint tests and doctests
# Running these checks through pytest allows us to report any errors in Junit format,
# which is posted directly on the PR
import os
import subprocess
import textwrap
import pytest
def check_output(proc: subprocess.CompletedProcess):
# Check the subprocess output, and raise an exception with the stdout/stderr dump if there was a non-zero exit
# The `check=True` flag available in `subprocess.run` does not print stdout/stderr
if proc.returncode == 0:
return
error_msg = textwrap.dedent(f"""\
Command {proc.args} failed with exit code {proc.returncode}.
----Begin stdout----
{proc.stdout}
----End stdout------
----Begin stderr----
{proc.stderr}
----End stderr------""")
raise RuntimeError(error_msg)
@pytest.mark.doctest
def test_run_doctests():
docs_folder = os.path.join(os.path.dirname(__file__), '..', 'docs')
check_output(subprocess.run(['make', 'clean'], cwd=docs_folder, capture_output=True, text=True))
# Must build the html first to ensure that doctests in .. autosummary:: generated pages are included
check_output(subprocess.run(['make', 'html'], cwd=docs_folder, capture_output=True, text=True))
check_output(subprocess.run(['make', 'doctest'], cwd=docs_folder, capture_output=True, text=True))
@pytest.mark.doctest
def test_docker_build_matrix():
"""Test that the docker build matrix is up to date."""
docker_folder = os.path.join(os.path.dirname(__file__), '..', 'docker')
# Capture the existing readme and build matrix contents
with open(os.path.join(docker_folder, 'README.md'), 'r') as f:
existing_readme = f.read()
with open(os.path.join(docker_folder, 'build_matrix.yaml'), 'r') as f:
existing_build_matrix = f.read()
# Run the script
check_output(
subprocess.run(['python', os.path.join(docker_folder, 'generate_build_matrix.py')],
cwd=docker_folder,
capture_output=True,
text=True))
# Assert that the files did not change
with open(os.path.join(docker_folder, 'README.md'), 'r') as f:
assert existing_readme == f.read()
with open(os.path.join(docker_folder, 'build_matrix.yaml'), 'r') as f:
assert existing_build_matrix == f.read()
@pytest.mark.doctest
@pytest.mark.parametrize('example', [1, 2])
def test_release_tests_reflect_readme(example: int):
"""Test that example_1.py and example_2.py in release_tests reflect the README.md."""
with open(os.path.join(os.path.dirname(__file__), '..', 'README.md'), 'r') as f:
readme_lines = f.readlines()
example_code_lines = []
found_begin = False
started = False
for i, line in enumerate(readme_lines):
if f'begin_example_{example}' in line:
found_begin = True
continue
# Wait until we get the ```python for start of code snippet
if found_begin and not started:
if line == '```python\n':
started = True
# Reached end of code snippet
elif started and line == '```\n':
# Code snippet continues
if i + 2 < len(readme_lines) and '-->\n' == readme_lines[
i + 1] and '<!--pytest-codeblocks:cont-->\n' == readme_lines[i + 2]:
started = False
# Code snippet ends
else:
break
# Add line
elif started:
example_code_lines.append(line)
example_file = os.path.join(os.path.dirname(__file__), '..', '.ci', 'release_tests', f'example_{example}.py')
with open(example_file, 'r') as f:
assert f.readlines() == example_code_lines
| composer-dev | tests/test_docs.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Sequence, Type
from unittest.mock import Mock
import pytest
from composer import Algorithm, Engine, Event, Logger, State, Trainer
from composer.algorithms import FusedLayerNorm, LowPrecisionLayerNorm, SelectiveBackprop
from composer.algorithms.low_precision_layernorm.low_precision_layernorm import LowPrecisionLayerNorm
from composer.core.passes import sort_to_back, sort_to_front
from composer.devices import DeviceCPU
from tests.common import SimpleModel
from .test_engine import run_event
@pytest.fixture
def always_match_algorithms():
return [
Mock(**{
'match.return.value': True,
'apply.return_value': n, # return encodes order
'interpolate_loss': False,
}) for n in range(5)
]
@pytest.fixture()
def dummy_logger(dummy_state: State):
return Logger(dummy_state)
def test_register_pass(dummy_state, dummy_logger):
dummy_algorithm = Mock()
dummy_algorithm.match.return_value = True
dummy_algorithm.apply.return_value = 'dummy'
def insert_dummy_algorithm(algorithms, event):
algorithms.append(dummy_algorithm)
return algorithms
engine = Engine(dummy_state, dummy_logger)
engine.register_pass(insert_dummy_algorithm)
trace = engine.run_event(Event.INIT)
assert 'dummy' in [tr.exit_code for tr in trace.values()]
class TestLIFOPass:
@pytest.mark.parametrize('event', [
Event.BEFORE_LOSS,
Event.BEFORE_BACKWARD,
])
def test_lifo_first_in(self, event: Event, dummy_state: State, dummy_logger: Logger,
always_match_algorithms: List[Algorithm]):
dummy_state.algorithms = always_match_algorithms
trace = run_event(event, dummy_state, dummy_logger)
order = [tr.order for tr in trace.values()]
expected_order = [tr.exit_code for tr in trace.values()] # use exit_code to uniquely label algos
assert order == expected_order
@pytest.mark.parametrize('event', [
Event.AFTER_LOSS,
Event.AFTER_BACKWARD,
])
def test_lifo_last_out(self, event: Event, dummy_state: State, always_match_algorithms: List[Algorithm],
dummy_logger: Logger):
dummy_state.algorithms = always_match_algorithms
trace = run_event(event, dummy_state, dummy_logger)
order = [tr.order for tr in trace.values()]
expected_order = list(reversed([tr.exit_code for tr in trace.values()]))
assert order == expected_order
class TestAlgorithmOrderingPasses:
@pytest.mark.parametrize('algorithm_cls', [FusedLayerNorm, LowPrecisionLayerNorm])
def test_algorithm_last(self, algorithm_cls: Type[Algorithm], always_match_algorithms: List[Algorithm],
dummy_logger: Logger, dummy_state: State):
if algorithm_cls == FusedLayerNorm or LowPrecisionLayerNorm:
pytest.importorskip('apex')
algorithm = algorithm_cls()
algorithm.apply = Mock(return_value='algo')
algorithm.match = Mock(return_value=True)
algortihms = always_match_algorithms[0:2] + [algorithm] + always_match_algorithms[2:]
dummy_state._algorithms = algortihms
trace = run_event(Event.INIT, dummy_state, dummy_logger)
expected = [0, 1, 2, 3, 4, 'algo']
actual = [tr.exit_code for tr in trace.values()]
assert actual == expected
@pytest.mark.parametrize('algorithm_cls', [SelectiveBackprop])
def test_algorithm_first(self, algorithm_cls: Type[Algorithm], always_match_algorithms: List[Algorithm],
dummy_logger: Logger, dummy_state: State):
algorithm = algorithm_cls()
algorithm.apply = Mock(return_value='algo')
algorithm.match = Mock(return_value=True)
algorithms = always_match_algorithms[0:2] + [algorithm] + always_match_algorithms[2:]
dummy_state._algorithms = algorithms
trace = run_event(Event.INIT, dummy_state, dummy_logger)
expected = ['algo', 0, 1, 2, 3, 4]
actual = [tr.exit_code for tr in trace.values()]
assert actual == expected
class TestSortHelpers:
def test_sort_to_back(self):
lst = [1, 'a', 'c', 2, 3.0]
assert sort_to_back(lst, int) == ['a', 'c', 3.0, 1, 2]
def test_sort_to_front(self):
lst = [1, 'a', 'c', 2, 3.0]
assert sort_to_front(lst, int) == [1, 2, 'a', 'c', 3.0]
def get_default_passes():
state = State(model=SimpleModel(), device=DeviceCPU(), rank_zero_seed=42, run_name='test_chungoose')
engine = Engine(state, Logger(state))
return engine.algorithm_passes
def get_custom_pass():
def sort_by_name(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
return sorted(algorithms, key=lambda x: type(x).__name__)
return sort_by_name
sort_by_name = get_custom_pass() # Generate pass object so we can use same ref in tests
class TestTrainerArg:
@pytest.mark.parametrize(
'algorithm_passes,expected_passes',
[
[None, get_default_passes()],
[sort_by_name, get_default_passes() + [sort_by_name]],
[[sort_by_name], get_default_passes() + [sort_by_name]],
[[sort_by_name, 0], [sort_by_name] + get_default_passes()], # type: ignore
[(sort_by_name, 0), [sort_by_name] + get_default_passes()], # type: ignore
[[(sort_by_name, 0)], [sort_by_name] + get_default_passes()], # type: ignore
])
def test_add_pass(self, algorithm_passes, expected_passes):
trainer = Trainer(model=SimpleModel(), algorithm_passes=algorithm_passes)
assert trainer.engine.algorithm_passes == expected_passes
| composer-dev | tests/test_passes.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import glob
import inspect
import os
import pytest
import testbook
from testbook.client import TestbookNotebookClient
import composer
from tests.common import device
nb_root = os.path.join(os.path.dirname(composer.__file__), '..', 'examples')
NOTEBOOKS = [
os.path.join(nb_root, nb) \
for nb in glob.glob(os.path.join(nb_root, '*.ipynb')) \
]
def _to_pytest_param(filepath: str):
notebook_name = os.path.split(filepath)[-1][:-len('.ipynb')]
marks = []
if notebook_name == 'ffcv_dataloaders':
marks.append(pytest.mark.vision)
return pytest.param(filepath, marks=marks)
def patch_notebooks():
import itertools
import multiprocessing
from torch.utils.data import DataLoader
from composer import Trainer
multiprocessing.cpu_count = lambda: 2
original_fit = Trainer.fit
def new_fit(self: Trainer, *args, **kwargs):
if 'duration' not in kwargs:
kwargs['duration'] = '2ep'
if 'train_subset_num_batches' not in kwargs:
kwargs['train_subset_num_batches'] = 2
if 'eval_dataloader' in kwargs and 'eval_subset_num_batches' not in kwargs:
kwargs['eval_subset_num_batches'] = 1
original_fit(self, *args, **kwargs)
Trainer.fit = new_fit
original_iter = DataLoader.__iter__
def new_iter(self: DataLoader):
return itertools.islice(original_iter(self), 2)
DataLoader.__iter__ = new_iter # type: ignore # error: DataLoader has a stricter return type than islice
def modify_cell_source(tb: TestbookNotebookClient, notebook_name: str, cell_source: str, s3_bucket: str) -> str:
# This function is called before each cell is executed
if notebook_name == 'functional_api':
# avoid div by 0 errors with batch size of 1
cell_source = cell_source.replace('max_epochs = 5', 'max_epochs = 1')
cell_source = cell_source.replace('acc_percent = 100 * num_right / eval_size', 'acc_percent = 1')
cell_source = cell_source.replace('batch_size = 1024', 'batch_size = 64')
if notebook_name == 'custom_speedup_methods':
cell_source = cell_source.replace('resnet_56', 'resnet_9')
cell_source = cell_source.replace('batch_size=1024', 'batch_size=64')
if notebook_name == 'finetune_huggingface':
cell_source = cell_source.replace(
'sst2_dataset = datasets.load_dataset("glue", "sst2")',
'sst2_dataset = datasets.load_dataset("glue", "sst2", download_mode="force_redownload")')
cell_source = cell_source.replace('batch_size=16', 'batch_size=2')
if notebook_name == 'pretrain_finetune_huggingface':
cell_source = cell_source.replace('batch_size=64', 'batch_size=1')
cell_source = cell_source.replace('batch_size=32', 'batch_size=1')
if notebook_name == 'early_stopping':
cell_source = cell_source.replace('batch_size = 1024', 'batch_size = 64')
if notebook_name == 'getting_started':
cell_source = cell_source.replace('batch_size = 1024', 'batch_size = 64')
if notebook_name == 'migrate_from_ptl':
cell_source = cell_source.replace('batch_size=256', 'batch_size=64')
return cell_source
@pytest.mark.parametrize('notebook', [_to_pytest_param(notebook) for notebook in NOTEBOOKS])
@device('cpu', 'gpu')
@pytest.mark.daily
def test_notebook(notebook: str, device: str, s3_bucket: str):
trainer_monkeypatch_code = inspect.getsource(patch_notebooks)
notebook_name = os.path.split(notebook)[-1][:-len('.ipynb')]
if notebook_name == 'medical_image_segmentation':
pytest.skip('Dataset is only available via kaggle; need to authenticate on ci/cd')
if notebook_name == 'training_with_submitit':
pytest.skip('The CI does not support SLURM and submitit')
if notebook_name == 'auto_microbatching' and device == 'cpu':
pytest.skip('auto_microbatching notebook only runs with a gpu')
if notebook_name == 'TPU_Training_in_composer':
pytest.skip('The CI does not support tpus')
if notebook_name == 'ffcv_dataloaders' and device == 'cpu':
pytest.skip('The FFCV notebook requires CUDA')
with testbook.testbook(notebook) as tb:
tb.inject(trainer_monkeypatch_code)
tb.inject('patch_notebooks()')
for i, cell in enumerate(tb.cells):
if cell['cell_type'] != 'code':
continue
cell['source'] = modify_cell_source(tb,
notebook_name=notebook_name,
cell_source=cell['source'],
s3_bucket=s3_bucket)
tb.execute_cell(i)
| composer-dev | tests/test_notebooks.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pathlib
import random
import pytest
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import composer
from composer.core import Batch, Precision, State
from composer.devices import DeviceCPU, DeviceGPU
from composer.loggers import Logger
from tests.common import SimpleModel, assert_state_equivalent
from tests.common.datasets import RandomClassificationDataset
def random_tensor(size=(4, 10)):
return torch.rand(*size)
def get_dummy_state(request: pytest.FixtureRequest):
model = SimpleModel()
dataset = RandomClassificationDataset()
dataloader = DataLoader(dataset, batch_size=4)
optimizers = torch.optim.Adadelta(model.parameters())
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(model=model,
device=device,
train_dataloader=dataloader,
run_name=f'{random.randint(0, 100)}',
rank_zero_seed=random.randint(0, 100),
precision=Precision.AMP_FP16,
max_duration=f'{random.randint(0, 100)}ep',
optimizers=optimizers,
device_train_microbatch_size=2)
state.schedulers = torch.optim.lr_scheduler.StepLR(optimizers, step_size=3)
state.loss = random_tensor()
state.batch = (random_tensor(), random_tensor())
state.outputs = random_tensor()
return state
def train_one_step(state: State, batch: Batch) -> None:
_, y = batch
state.batch = batch
for optimizer in state.optimizers:
optimizer.zero_grad()
state.outputs = state.model(state.batch)
assert isinstance(y, torch.Tensor)
state.loss = F.cross_entropy(state.outputs, y)
state.loss.backward()
for optimizer in state.optimizers:
optimizer.step()
for scheduler in state.schedulers:
scheduler.step()
state.timestamp = state.timestamp.to_next_batch(len(batch))
def test_state_serialize(tmp_path: pathlib.Path, empty_logger: Logger, request: pytest.FixtureRequest):
state1 = get_dummy_state(request)
state2 = get_dummy_state(request)
dataloader = DataLoader(
dataset=RandomClassificationDataset(),
batch_size=2,
)
dataloader_iter = iter(dataloader)
# train one step to set the optimizer states
batch = next(dataloader_iter)
train_one_step(state1, batch)
# load from state1 to state2
state_dict = state1.state_dict()
filepath = str(tmp_path / 'state.pt')
torch.save(state_dict, filepath)
state_dict_2 = torch.load(filepath, map_location='cpu')
state2.load_state_dict(state_dict_2, empty_logger)
# serialization/deserialization should be exact
assert_state_equivalent(state1, state2)
# train both for one step on another sample
batch = next(dataloader_iter)
train_one_step(state1, batch)
train_one_step(state2, batch)
# both states should have equivalent
# state, model parameters, loss, and outputs
assert_state_equivalent(state1, state2)
# yapf: disable
@pytest.mark.parametrize('batch,key,val', [
([1234, 5678], 0, 1234),
([1234, 5678], 1, 5678),
({'a': 1, 'b': 2}, 'a', 1),
({'a': 1, 'b': 2}, 'b', 2),
(({'a': 1, 'b': 7}, {'c': 5}), lambda x: x[1]['c'], 5),
])
# yapf: enable
def test_state_batch_get_item(batch, key, val, request: pytest.FixtureRequest):
state = get_dummy_state(request)
state.batch = batch
assert state.batch_get_item(key) == val
# yapf: disable
@pytest.mark.parametrize('batch,key,val', [
([1234, 5678], 0, 1111),
([1234, 5678], 1, 1111),
({'a': 1, 'b': 2}, 'a', 9),
({'a': 1, 'b': 2}, 'b', 9),
])
# yapf: enable
def test_state_batch_set_item(batch, key, val, request: pytest.FixtureRequest):
state = get_dummy_state(request)
state.batch = batch
state.batch_set_item(key=key, value=val)
assert state.batch_get_item(key) == val
def test_composer_metadata_in_state_dict(tmp_path, request: pytest.FixtureRequest):
state = get_dummy_state(request)
save_path = pathlib.Path(tmp_path) / 'state_dict.pt'
with open(save_path, 'wb') as _tmp_file:
torch.save(state.state_dict(), _tmp_file)
loaded_state_dict = torch.load(save_path)
expected_env_info_keys = set([
'composer_version', 'composer_commit_hash', 'node_world_size', 'host_processor_model_name',
'host_processor_core_count', 'local_world_size', 'accelerator_model_name', 'cuda_device_count'
])
actual_env_info_keys = set(loaded_state_dict['metadata']['composer_env_info'].keys())
assert expected_env_info_keys == actual_env_info_keys
assert loaded_state_dict['metadata']['composer_env_info']['composer_version'] == composer.__version__
assert loaded_state_dict['metadata']['device'] == 'cpu'
assert loaded_state_dict['metadata']['precision'] == 'amp_fp16'
assert loaded_state_dict['metadata']['world_size'] == 1
assert loaded_state_dict['metadata']['device_train_microbatch_size'] == 2
assert loaded_state_dict['metadata']['train_dataloader_batch_size'] == 4
| composer-dev | tests/test_state.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Mapping, Tuple, Union
import pytest
import torch
from composer.core.data_spec import _default_split_batch, _split_list, _split_tensor
def dummy_tensor_batch(batch_size=12) -> torch.Tensor:
return torch.randn(size=(batch_size, 3, 32, 32))
def dummy_list_str(batch_size=12) -> List[str]:
return [str(x) for x in range(batch_size)]
def dummy_tuple_batch(batch_size=12) -> List[torch.Tensor]:
# pytorch default collate converts tuples to lists
# https://github.com/pytorch/pytorch/blob/e451259a609acdcd83105177ddba73fc41cfa9b4/torch/utils/data/_utils/collate.py#L67
image = torch.randn(size=(batch_size, 3, 32, 32))
target = torch.randint(size=(batch_size,), high=10)
return [image, target]
def dummy_tuple_batch_long(batch_size=12) -> List[torch.Tensor]:
image_1 = torch.randn(size=(batch_size, 3, 32, 32))
image_2 = torch.randn(size=(batch_size, 3, 32, 32))
image_3 = torch.randn(size=(batch_size, 3, 32, 32))
target = torch.randint(size=(batch_size,), high=10)
return [image_1, image_2, image_3, target]
def dummy_dict_batch(batch_size=12) -> Dict[str, torch.Tensor]:
image = torch.randn(size=(batch_size, 3, 32, 32))
target = torch.randint(size=(batch_size,), high=10)
return {'image': image, 'target': target}
def dummy_dict_batch_with_metadata(batch_size=12) -> Dict[str, Union[List, torch.Tensor, str]]:
# sometimes metadata is included with a batch that isn't taken by the model.
image = torch.randn(size=(batch_size, 3, 32, 32))
target = torch.randint(size=(batch_size,), high=10)
meta = ['hi im a tag' for _ in range(batch_size)]
index = [[1, 2, 3] for _ in range(batch_size)]
return {'image': image, 'target': target, 'meta': meta, 'index': index}
def dummy_dict_batch_with_common_metadata(batch_size=12) -> Dict[str, Union[List, torch.Tensor, str]]:
# sometimes metadata is included with a batch that isn't taken by the model.
image = torch.randn(size=(batch_size, 3, 32, 32))
target = torch.randint(size=(batch_size,), high=10)
meta = 'this is a string'
index = [[1, 2, 3] for _ in range(batch_size)]
return {'image': image, 'target': target, 'meta': meta, 'index': index}
def dummy_maskrcnn_batch(batch_size=12,
image_height=12,
image_width=12,
num_classes=80,
max_detections=5) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
def generate_maskrcnn_sample(num_detections,
image_height=image_height,
image_width=image_width,
num_classes=num_classes):
"""Generates a maskrcnn style sample: (Tensor, Dict[Tensor])."""
image = torch.randn(size=(3, image_height, image_width)).type(torch.float)
target = {
'boxes':
torch.randint(size=(num_detections, 4), low=0, high=min(image_height, image_width)).type(torch.float),
'labels':
torch.randint(size=(num_detections,), low=0, high=num_classes + 1),
'masks':
torch.randint(size=(num_detections, image_height, image_width), low=0, high=2).type(torch.uint8)
}
return image, target
return [
generate_maskrcnn_sample(num_detections=n)
for n in torch.randint(size=(batch_size,), low=1, high=max_detections + 1)
]
def dummy_batches(batch_size=12):
return [
dummy_tensor_batch(batch_size=batch_size),
dummy_list_str(batch_size=batch_size),
dummy_tuple_batch(batch_size=batch_size),
dummy_tuple_batch_long(batch_size=batch_size),
dummy_dict_batch(batch_size=batch_size),
dummy_dict_batch_with_metadata(batch_size=batch_size),
dummy_dict_batch_with_common_metadata(batch_size=batch_size),
]
@pytest.mark.parametrize('batch', dummy_batches(12))
def test_split_without_error(batch):
microbatches = _default_split_batch(batch, microbatch_size=3)
assert len(microbatches) == 4
@pytest.mark.parametrize('batch', [dummy_tensor_batch(i) for i in [12, 13, 14, 15]])
def test_tensor_vs_list_chunking(batch):
tensor_microbatches = _split_tensor(batch, microbatch_size=4)
list_microbatches = _split_list([t for t in batch], microbatch_size=4)
assert len(tensor_microbatches) == len(list_microbatches)
assert all(torch.equal(t1, torch.stack(t2, dim=0)) for t1, t2 in zip(tensor_microbatches, list_microbatches))
@pytest.mark.parametrize('batch', [dummy_tuple_batch(12)])
def test_split_tuple(batch):
microbatches = _default_split_batch(batch, microbatch_size=4)
# should be 3 microbatches of size 4 tensors pairs
# should split into [(x, y), (x, y), (x, y)]
assert len(microbatches[0]) == 2
@pytest.mark.parametrize('batch', [dummy_tuple_batch_long(12)])
def test_split_tuple_long(batch):
microbatches = _default_split_batch(batch, microbatch_size=4)
assert len(microbatches[0]) == 4
@pytest.mark.parametrize('batch', dummy_batches(6))
def test_batch_sizes(batch):
microbatches = _default_split_batch(batch, microbatch_size=2)
# should split into [len(2), len(2), len(1)]
assert len(microbatches) == 3
for microbatch in microbatches:
if isinstance(microbatch, Mapping):
assert len(microbatch['image']) == 2
assert len(microbatch['target']) == 2
if isinstance(microbatch, tuple):
assert len(microbatch[0]) == 2
if isinstance(microbatch, list):
assert len(microbatch) == 2
@pytest.mark.parametrize('batch', dummy_batches(5))
def test_odd_batch_sizes(batch):
microbatches = _default_split_batch(batch, microbatch_size=2)
# should split into [len(2), len(2), len(1)]
assert len(microbatches) == 3
last_microbatch = microbatches[-1]
if isinstance(last_microbatch, Mapping):
assert len(last_microbatch['image']) == 1
assert len(last_microbatch['target']) == 1
if isinstance(last_microbatch, tuple):
assert len(last_microbatch[0]) == 1
if isinstance(last_microbatch, list):
assert len(last_microbatch) == 1
@pytest.mark.parametrize('batch', dummy_batches(2))
def test_microbatch_size_greater_than_batch_size(batch):
with pytest.warns(UserWarning):
microbatches = _default_split_batch(batch, microbatch_size=3)
assert len(microbatches) == 1
@pytest.mark.parametrize('batch', [dummy_maskrcnn_batch(12)])
def test_microbatch_size_split_maskrcnn(batch):
microbatches = _split_list(batch, microbatch_size=4)
assert len(microbatches) == 3
@pytest.mark.parametrize('batch', [dummy_dict_batch_with_common_metadata(12)])
def test_primitive_broadcast(batch):
microbatches = _default_split_batch(batch, microbatch_size=3)
assert len(microbatches) == 4
for mb in microbatches:
assert mb['meta'] == 'this is a string'
| composer-dev | tests/test_split_batch.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from torch.nn import functional as F
from composer.loss import DiceLoss, soft_cross_entropy
from composer.loss.utils import _one_hot, ensure_targets_one_hot, infer_target_type
def generate_targets():
return [
# Binary classification
torch.randint(low=0, high=2, size=(8,), dtype=torch.long),
# Classification
torch.randint(low=0, high=10, size=(8,), dtype=torch.long),
# Segmentation
torch.randint(low=0, high=2, size=(8, 5, 5), dtype=torch.long),
torch.randint(low=0, high=10, size=(8, 5, 5), dtype=torch.long),
# 3D inputs
torch.randint(low=0, high=10, size=(8, 5, 7, 11), dtype=torch.long),
torch.randint(low=0, high=10, size=(8, 5, 8, 11), dtype=torch.long)
]
@pytest.mark.parametrize('targets', generate_targets())
class TestOneHot():
def test_one_hot(self, targets):
composer_one_hot = _one_hot(targets)
pytorch_one_hot = F.one_hot(targets)
torch.testing.assert_close(composer_one_hot, pytorch_one_hot)
def test_one_hot_num_classes(self, targets):
num_classes = targets.max() + 1
composer_one_hot = _one_hot(targets, num_classes=num_classes)
pytorch_one_hot = F.one_hot(targets, num_classes=num_classes)
torch.testing.assert_close(composer_one_hot, pytorch_one_hot)
@pytest.mark.parametrize('dim', [1])
def test_one_hot_dim(self, targets, dim):
composer_one_hot = _one_hot(targets, dim=dim)
pytorch_one_hot = F.one_hot(targets)
# Move class dim to specified dim
pytorch_one_hot = torch.movedim(pytorch_one_hot, source=-1, destination=dim).contiguous()
torch.testing.assert_close(composer_one_hot, pytorch_one_hot)
@pytest.mark.xfail(raises=ValueError)
def test_one_hot_wrong_type(self, targets):
targets = _one_hot(targets.float())
@pytest.mark.xfail(raises=ValueError)
def test_one_hot_wrong_classes(self, targets):
targets = _one_hot(targets, num_classes=1)
def fake_input_target_pairs(input_shape):
num_classes = input_shape[1]
reduced_input_shape = list(input_shape)
reduced_input_shape.pop(1)
input = torch.randn(input_shape)
targets_idx = torch.randint(low=-1, high=num_classes, size=reduced_input_shape)
targets_one_hot = torch.zeros_like(input)
for i, value in np.ndenumerate(targets_idx):
i_expanded = list(i)
if value >= 0:
i_expanded.insert(1, value)
targets_one_hot[tuple(i_expanded)] = 1.0
return input, targets_idx, targets_one_hot
def xfail(val):
"""shorthand to mark xfail parameters."""
return pytest.param(val, marks=pytest.mark.xfail)
def generate_tensor_pairs():
return [
# Binary classification
fake_input_target_pairs((64, 2)),
# Classification
fake_input_target_pairs((64, 10)),
# Segmentation
fake_input_target_pairs((64, 2, 5, 5)),
fake_input_target_pairs((64, 10, 5, 5)),
# 3D inputs
fake_input_target_pairs((64, 2, 5, 7, 11)),
fake_input_target_pairs((64, 10, 5, 7, 11))
]
@pytest.mark.filterwarnings(
r'ignore:Negative label indices are being ignored in conversion to one-hot labels:UserWarning')
@pytest.mark.parametrize('tensors', generate_tensor_pairs())
def test_ensure_targets_one_hot(tensors):
input, targets_idx, targets_one_hot = tensors
targets_one_hot_test = ensure_targets_one_hot(input, targets_idx)
torch.testing.assert_close(targets_one_hot, targets_one_hot_test, check_stride=False)
@pytest.mark.parametrize('tensors', generate_tensor_pairs())
class TestSoftCrossEntropy:
def test_infer_target_type(self, tensors):
input, target_indices, target_onehot = tensors
assert infer_target_type(input, target_indices) == 'indices'
assert infer_target_type(input, target_onehot) == 'one_hot'
@pytest.mark.parametrize('reduction', ['mean', 'sum'])
@pytest.mark.parametrize('use_weights', [xfail(True), False])
# TODO(Cory): Remove this filterwarning
@pytest.mark.filterwarnings(r'ignore:Some targets have less than 1 total probability:UserWarning')
def test_soft_cross_entropy(self, tensors, use_weights, reduction):
input, target_indices, target_onehot = tensors
if use_weights:
num_classes = target_onehot.shape[1]
weights = torch.rand(size=[num_classes])
else:
weights = None
loss_indices = soft_cross_entropy(input, target_indices, weight=weights, reduction=reduction, ignore_index=-1)
loss_onehot = soft_cross_entropy(input, target_onehot, weight=weights, reduction=reduction)
loss_reference = F.cross_entropy(input, target_indices, weight=weights, reduction=reduction, ignore_index=-1)
torch.testing.assert_close(loss_indices, loss_onehot)
torch.testing.assert_close(loss_indices, loss_reference)
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('squared_pred', [True, False])
@pytest.mark.parametrize('jaccard', [True, False])
@pytest.mark.parametrize('batch', [True, False])
@pytest.mark.parametrize('ignore_absent_classes', [True, False])
class TestDiceLoss:
@pytest.fixture()
def target(self):
target = torch.tensor([[[-1], [0], [1], [2]]]).repeat(1, 1, 4)
target = torch.cat([target, target[:, [1, 2, 3, 0]], target[:, [2, 3, 0, 1]], target[:, [3, 0, 1, 2]]], dim=0)
return target
@pytest.fixture()
def correct_input(self, target):
input = target.clone()
input[input == -1] = 1 # replace negative label with class prediction
input = F.one_hot(input)
input = torch.movedim(input, 3, 1)
return input
@pytest.fixture()
def incorrect_input(self, correct_input):
return correct_input[[3, 2, 1, 0]]
@pytest.mark.parametrize('reduction', ['mean', 'sum'])
def test_correct_prediction(self, correct_input: torch.Tensor, target: torch.Tensor, squared_pred: bool,
jaccard: bool, batch: bool, ignore_absent_classes: bool, reduction: str):
dice_loss = DiceLoss(squared_pred=squared_pred,
jaccard=jaccard,
batch=batch,
ignore_absent_classes=ignore_absent_classes,
reduction=reduction)
assert dice_loss(correct_input, target) == 0.0
def test_incorrect_prediction(self, incorrect_input: torch.Tensor, target: torch.Tensor, squared_pred: bool,
jaccard: bool, batch: bool, ignore_absent_classes: bool):
dice_loss = DiceLoss(squared_pred=squared_pred,
jaccard=jaccard,
batch=batch,
ignore_absent_classes=ignore_absent_classes)
loss = dice_loss(incorrect_input, target)
torch.testing.assert_close(loss, torch.tensor(1.0))
| composer-dev | tests/test_loss.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Tuple, Union
import pytest
import torch
from composer.devices import DeviceCPU, DeviceGPU
from composer.devices.device import _map_batch
from tests.common import device, world_size
def dummy_tensor_batch() -> torch.Tensor:
return torch.randn(size=(1, 1, 1, 1))
def dummy_tuple_batch() -> Tuple[torch.Tensor, torch.Tensor]:
image = torch.randn(size=(1, 1, 1, 1))
target = torch.randint(size=(1,), high=10)
return image, target
def dummy_tuple_batch_long() -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
image_1 = torch.randn(size=(1, 1, 1, 1))
image_2 = torch.randn(size=(1, 1, 1, 1))
image_3 = torch.randn(size=(1, 1, 1, 1))
target = torch.randint(size=(1,), high=10)
return image_1, image_2, image_3, target
def dummy_dict_batch() -> Dict[str, torch.Tensor]:
image = torch.randn(size=(1, 1, 1, 1))
target = torch.randint(size=(1,), high=10)
return {'image': image, 'target': target}
def dummy_dict_batch_with_metadata(batch_size=1) -> Dict[str, Union[List, torch.Tensor, str]]:
# sometimes metadata is included with a batch that isn't taken by the model.
image = torch.randn(size=(batch_size, 1, 1, 1))
target = torch.randint(size=(batch_size,), high=10)
meta = ['hi im a tag' for _ in range(batch_size)]
index = [[1, 2, 3] for _ in range(batch_size)]
return {'image': image, 'target': target, 'meta': meta, 'index': index}
def dummy_maskrcnn_batch() -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
def generate_maskrcnn_sample(num_detections, image_height=1, image_width=1, num_classes=1):
"""Generates a maskrcnn style sample: (Tensor, Dict[Tensor])."""
image = torch.randn(size=(3, image_height, image_width)).type(torch.float)
target = {
'boxes':
torch.randint(size=(num_detections, 4), low=0, high=min(image_height, image_width)).type(torch.float),
'labels':
torch.randint(size=(num_detections,), low=0, high=num_classes),
'masks':
torch.randint(size=(num_detections, image_height, image_width), low=0, high=2).type(torch.uint8)
}
return image, target
def generate_maskrcnn_batch(batch_size, max_detections):
return [generate_maskrcnn_sample(n) for n in torch.randint(size=(batch_size,), low=1, high=max_detections)]
return generate_maskrcnn_batch(batch_size=1, max_detections=2)
@device('cpu', 'gpu')
@pytest.mark.parametrize('batch', [
dummy_tensor_batch(),
dummy_tuple_batch(),
dummy_tuple_batch_long(),
dummy_dict_batch(),
dummy_dict_batch_with_metadata(),
dummy_maskrcnn_batch()
])
def test_to_device(device, batch):
device_handler = DeviceCPU() if device == 'cpu' else DeviceGPU()
def assert_device(x):
if isinstance(x, torch.Tensor):
assert x.device.type == device_handler._device.type
new_batch = device_handler.batch_to_device(batch)
_map_batch(new_batch, assert_device)
@world_size(2)
@device('gpu')
def test_gpu_device_id(device, world_size):
device_gpu = DeviceGPU(device_id=0)
assert device_gpu._device.index == 0
| composer-dev | tests/test_device.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
from packaging import version
from torch.utils.data import DataLoader
from composer import Trainer
from composer.core import Event, Time
from composer.core.time import TimeUnit
from composer.utils import dist
from tests.common import RandomClassificationDataset, SimpleModel
from tests.common.events import EventCounterCallback
@pytest.mark.parametrize('event', list(Event))
def test_event_values(event: Event):
assert event.name.lower() == event.value
class TestEventCalls:
eval_subset_num_batches = 5
train_subset_num_batches = 5
def get_trainer(self, precision='fp32', **kwargs):
model = SimpleModel()
optimizer = torch.optim.Adam(model.parameters())
train_dataset = RandomClassificationDataset()
eval_dataset = RandomClassificationDataset()
train_batch_size = 4
return Trainer(
model=model,
train_dataloader=DataLoader(
dataset=train_dataset,
batch_size=train_batch_size,
sampler=dist.get_sampler(train_dataset),
),
eval_dataloader=DataLoader(
dataset=eval_dataset,
batch_size=8,
sampler=dist.get_sampler(eval_dataset),
),
device_train_microbatch_size=train_batch_size // 2,
precision=precision,
train_subset_num_batches=self.train_subset_num_batches,
eval_subset_num_batches=self.eval_subset_num_batches,
max_duration='2ep',
optimizers=optimizer,
callbacks=[EventCounterCallback()],
**kwargs,
)
@pytest.mark.parametrize('world_size', [
pytest.param(1),
pytest.param(2, marks=pytest.mark.world_size(2)),
])
@pytest.mark.parametrize(
'device,deepspeed_zero_stage,use_fsdp,precision',
[
pytest.param('cpu', None, False, 'fp32', id='cpu-ddp'),
# TODO: Remove filterwarnings after FSDP remove deprecated code
pytest.param('gpu', True, False, 'fp32', id='gpu-ddp', marks=pytest.mark.gpu),
pytest.param('gpu',
None,
True,
'amp_fp16',
id='gpu-fsdp',
marks=[
pytest.mark.gpu,
pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher'),
pytest.mark.filterwarnings('ignore::UserWarning'),
]),
])
@pytest.mark.parametrize('save_interval', ['1ep', '1ba'])
def test_event_calls(self, world_size, device, deepspeed_zero_stage, use_fsdp, precision, save_interval):
save_interval = Time.from_timestring(save_interval)
deepspeed_config = None
if deepspeed_zero_stage:
deepspeed_config = {'zero_optimization': {'stage': deepspeed_zero_stage}}
fsdp_config = None
if use_fsdp:
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
'min_params': 1e8,
'cpu_offload': False,
'mixed_precision': 'PURE',
'backward_prefetch': 'BACKWARD_PRE',
'activation_checkpointing': False,
'activation_ocpu_offload': False,
'verbose': False
}
trainer = self.get_trainer(
precision=precision,
device=device,
deepspeed_config=deepspeed_config,
fsdp_config=fsdp_config,
save_interval=save_interval,
eval_interval=save_interval,
)
trainer.fit()
self._assert_expected_event_calls(trainer, save_interval, num_epochs=2)
def _assert_expected_event_calls(self, trainer: Trainer, eval_interval: Time, num_epochs: int):
state = trainer.state
assert state.dataloader_len is not None
total_steps = num_epochs * int(state.dataloader_len)
batch_size = state.train_dataloader.batch_size # type: ignore
assert batch_size is not None
assert state.device_train_microbatch_size is not None
total_microbatches = total_steps * math.ceil(batch_size / state.device_train_microbatch_size)
if eval_interval.unit == TimeUnit.BATCH:
total_evals = total_steps // int(eval_interval)
elif eval_interval.unit == TimeUnit.EPOCH:
total_evals = num_epochs // int(eval_interval)
else:
total_evals = 0
if trainer.state.evaluators:
steps_per_eval = self.eval_subset_num_batches
total_eval_steps = total_evals * steps_per_eval * len(trainer.state.evaluators)
else:
total_eval_steps = 0
expected_num_calls = {
Event.INIT: 1,
Event.AFTER_LOAD: 1,
Event.EPOCH_START: num_epochs,
Event.BATCH_START: total_steps,
Event.BEFORE_DATALOADER: total_steps + num_epochs, # extra call per epoch when dataloader is exhausted
Event.AFTER_DATALOADER: total_steps,
Event.BEFORE_FORWARD: total_microbatches,
Event.AFTER_FORWARD: total_microbatches,
Event.BEFORE_LOSS: total_microbatches,
Event.AFTER_LOSS: total_microbatches,
Event.BEFORE_BACKWARD: total_microbatches,
Event.AFTER_BACKWARD: total_microbatches,
Event.BEFORE_TRAIN_BATCH: total_steps,
Event.AFTER_TRAIN_BATCH: total_steps,
Event.BATCH_END: total_steps,
Event.BATCH_CHECKPOINT: total_steps,
Event.EPOCH_END: num_epochs,
Event.EPOCH_CHECKPOINT: num_epochs,
Event.EVAL_START: total_evals,
Event.EVAL_BATCH_START: total_eval_steps,
Event.EVAL_BEFORE_FORWARD: total_eval_steps,
Event.EVAL_AFTER_FORWARD: total_eval_steps,
Event.EVAL_BATCH_END: total_eval_steps,
Event.EVAL_END: total_evals,
}
counter_callback = (cb for cb in trainer.state.callbacks if isinstance(cb, EventCounterCallback))
counter_callback = next(counter_callback)
for event, expected in expected_num_calls.items():
actual = counter_callback.event_to_num_calls[event]
assert expected == actual, f'{event} call mismatch: {expected} != {actual}'
| composer-dev | tests/test_events.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from composer import (algorithms, callbacks, core, datasets, devices, functional, loggers, loss, metrics, models, optim,
profiler, trainer, utils)
# This very simple test is just to use the above imports, which check and make sure we can import all the top-level
# modules from composer. This is mainly useful for checking that we have correctly conditionally imported all optional
# dependencies.
def test_smoketest():
assert callbacks
assert algorithms
assert core
assert datasets
assert devices
assert functional
assert loggers
assert loss
assert metrics
assert models
assert optim
assert profiler
assert trainer
assert utils
| composer-dev | tests/test_smoketest.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import platform
import PIL
import pytest
from torch.utils.data import DataLoader
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
@pytest.mark.skipif('composer-python' not in os.environ['PATH'] or 'Linux' not in platform.system(),
reason='Pillow-simd test only checks if using the composer docker')
class TestDocker:
def test_pillow_simd(self):
assert 'post' in PIL.__version__, 'pillow-simd is not installed'
@pytest.mark.gpu
def test_apex(self):
"""Test that apex is installed and works in the GPU image."""
import apex
model = SimpleModel()
opt = apex.optimizers.FusedAdam(model.parameters(), lr=0.01)
trainer = Trainer(
model=model,
train_dataloader=DataLoader(RandomClassificationDataset()),
optimizers=opt,
max_duration='2ba',
)
trainer.fit()
| composer-dev | tests/test_docker.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import MagicMock
import pytest
from torch.utils.data import DataLoader
from composer.core import Callback, State
from composer.loggers import Logger
from composer.trainer import Trainer
from tests.common import SimpleModel
from tests.common.datasets import RandomClassificationDataset
class MetricsCallback(Callback):
def __init__(self, compute_val_metrics: bool) -> None:
self.compute_val_metrics = compute_val_metrics
self._train_batch_end_train_accuracy = None
def init(self, state: State, logger: Logger) -> None:
# on init, the current metrics should be empty
del logger # unused
assert state.train_metrics == {}, 'no train metrics should be defined on init()'
assert state.eval_metrics == {}, 'no eval metrics should be defined on init()'
def batch_end(self, state: State, logger: Logger) -> None:
# The metric should be computed and updated on state every batch.
del logger # unused
# assuming that at least one sample was correctly classified
assert state.train_metrics['MulticlassAccuracy'].compute() != 0.0
self._train_batch_end_train_accuracy = state.train_metrics['MulticlassAccuracy']
def epoch_end(self, state: State, logger: Logger) -> None:
# The metric at epoch end should be the same as on batch end.
del logger # unused
assert state.train_metrics['MulticlassAccuracy'].compute() == self._train_batch_end_train_accuracy
def eval_end(self, state: State, logger: Logger) -> None:
if self.compute_val_metrics:
# assuming that at least one sample was correctly classified
assert state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
@pytest.mark.parametrize('eval_interval', ['1ba', '1ep', '0ep'])
def test_current_metrics(eval_interval: str,):
# Configure the trainer
mock_logger_destination = MagicMock()
mock_logger_destination.log_metrics = MagicMock()
model = SimpleModel(num_features=1, num_classes=2)
compute_val_metrics = eval_interval != '0ep'
train_subset_num_batches = 2
eval_subset_num_batches = 2
num_epochs = 2
metrics_callback = MetricsCallback(compute_val_metrics=compute_val_metrics,)
dataset_kwargs = {
'num_classes': 2,
'shape': (1, 5, 5),
}
# Create the trainer
trainer = Trainer(
model=model,
train_dataloader=DataLoader(
RandomClassificationDataset(**dataset_kwargs),
batch_size=16,
),
eval_dataloader=DataLoader(
RandomClassificationDataset(**dataset_kwargs),
batch_size=8,
),
max_duration=num_epochs,
train_subset_num_batches=train_subset_num_batches,
eval_subset_num_batches=eval_subset_num_batches,
loggers=[mock_logger_destination],
callbacks=[metrics_callback],
eval_interval=eval_interval,
)
# Train the model
trainer.fit()
if not compute_val_metrics:
return
# Validate the metrics
assert trainer.state.train_metrics['MulticlassAccuracy'].compute() != 0.0
if compute_val_metrics:
assert trainer.state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
else:
assert 'eval' not in trainer.state.eval_metrics
num_step_and_index_calls = 2 # global_step and batch_idx calls
num_loss_calls_per_epoch = 1
# Validate that the logger was called the correct number of times for metric calls
num_expected_calls = 0
# Every epoch is logged.
num_expected_calls += num_epochs
num_expected_calls += num_epochs * train_subset_num_batches * num_step_and_index_calls
num_expected_calls += num_epochs * train_subset_num_batches * num_loss_calls_per_epoch
# computed once per batch
# and again at epoch end
num_expected_calls += (train_subset_num_batches + 1) * num_epochs
# computed at eval end
if compute_val_metrics:
num_evals = 0
if eval_interval == '1ba':
num_evals += train_subset_num_batches * num_epochs
if eval_interval == '1ep':
num_evals += num_epochs
num_expected_calls += num_evals
num_actual_calls = len(mock_logger_destination.log_metrics.mock_calls)
assert num_actual_calls == num_expected_calls
| composer-dev | tests/metrics/test_current_metrics.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from tests.metrics.metric_setter_callback import MetricSetterCallback
__all__ = ['MetricSetterCallback']
| composer-dev | tests/metrics/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.nn.functional as F
from composer.metrics import MIoU
@pytest.fixture
def block_2D_targets():
base = torch.arange(4)
targets = []
for i in range(4):
targets.append(torch.roll(base, i).repeat_interleave(2).view(2, 4).repeat_interleave(2, dim=0))
targets = torch.stack(targets)
return targets
def test_miou(block_2D_targets):
miou = MIoU(num_classes=4)
# Test if predictions identical to target equal 1.0
# TODO: convert to prediction to one-hot
accurate_prediction = F.one_hot(block_2D_targets, num_classes=4).permute(0, 3, 1, 2)
miou.update(accurate_prediction, block_2D_targets)
assert miou.compute() == 100.
miou.reset()
# Test if completely incorrect predictions equal 0.0
inaccurate_prediction = torch.flip(accurate_prediction, dims=(0,))
miou.update(inaccurate_prediction, block_2D_targets)
assert miou.compute() == 0.0
miou.reset()
# Test if halfway correct predictions is close to 33.3333
accurateish_prediction = torch.roll(accurate_prediction, shifts=1, dims=2)
miou.update(accurateish_prediction, block_2D_targets)
assert torch.isclose(miou.compute(), torch.tensor(33.3333, dtype=torch.double))
miou.reset()
# Test if all zeros prediction is equal to 6.25
all_zeros = torch.zeros(4, 1, 4, 4)
miou.update(all_zeros, block_2D_targets)
assert miou.compute() == 6.25
miou.reset()
# Test if only one correct sample is equal to 100 * (1/7)
one_accurate_prediction = inaccurate_prediction.clone()
one_accurate_prediction[0] = accurate_prediction[0]
miou.update(one_accurate_prediction, block_2D_targets)
assert torch.isclose(miou.compute(), torch.tensor(100 / 7, dtype=torch.double))
| composer-dev | tests/metrics/test_miou.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import math
import pytest
import torch
from torch.nn.functional import cross_entropy
from composer.metrics.nlp import (BinaryF1Score, HFCrossEntropy, InContextLearningLMAccuracy,
InContextLearningMultipleChoiceAccuracy, InContextLearningQAAccuracy,
LanguageCrossEntropy, LanguagePerplexity, MaskedAccuracy, Perplexity)
@pytest.mark.parametrize('ignore_index', [-100])
@pytest.mark.parametrize('num_classes', [2, 3, 4, 5])
def test_masked_accuracy(ignore_index, num_classes):
"""Sanity check to make sure that masked accuracy has reasonable performance.
Generates random targets and labels, and then ensures that the random targets and labels
must hit at-chance accuracy.
Args:
batch_size (int): how many samples are in each batch
ignore_index (Optional[int]): if present, the class index to ignore in accuracy calculations.
num_classes (int): the number of classes in the classification task
"""
batch_size = int(1e4)
torchmetrics_masked_acc = MaskedAccuracy(ignore_index=ignore_index)
# we're only testing binary accuracy -- expected accuracy should be 50%
generated_preds = torch.rand((batch_size, num_classes))
true_labels = torch.randint(low=0, high=num_classes - 1, size=(batch_size,))
if ignore_index is not None:
labels_mask = torch.rand((batch_size,))
labels_mask[labels_mask > 0.8] = 1
labels_mask[labels_mask <= 0.8] = 0
labels_mask = labels_mask.bool()
true_labels[labels_mask] = ignore_index
true_labels = true_labels.float()
generated_preds = generated_preds.float()
torchmetrics_masked_acc.update(generated_preds, true_labels)
final_acc = torchmetrics_masked_acc.compute()
assert abs(final_acc - (1.0 / num_classes)) < 0.02
@pytest.mark.parametrize('ignore_index', [-100])
@pytest.mark.parametrize('batch_size', [1e2, 1e3])
@pytest.mark.parametrize('sequence_length', [128])
@pytest.mark.parametrize('num_classes', [2, 10])
@pytest.mark.parametrize('minibatch_size', [56, 256, 768])
def test_cross_entropy(batch_size: float, ignore_index: int, sequence_length: int, num_classes: int,
minibatch_size: int):
"""Sanity check to make sure that batched CrossEntropyLoss matches the expected performance.
Generates a predicted distribution from a normal distribution, and a ground truth from a normal distribution.
Verifies Cross Entropy Loss against the baseline performance.
Args:
batch_size (int): how many samples are in each batch
ignore_index (Optional[int]): if present, the class index to ignore in accuracy calculations.
sequence_length (int): the length of the generated sequence
num_classes (int): the number of classes in the classification task
minibatch_size (int): the minibatch size to simulate for model predictions
"""
batch_size = int(batch_size)
generated_preds = torch.randn((batch_size, sequence_length, num_classes))
generated_true = torch.randint(low=0, high=num_classes, size=(batch_size, sequence_length))
torchmetrics_xent = LanguageCrossEntropy(dist_sync_on_step=False, ignore_index=ignore_index)
ce_with_keys_metric = LanguageCrossEntropy(dist_sync_on_step=False, ignore_index=ignore_index)
if ignore_index is not None:
labels_mask = torch.rand((batch_size, sequence_length))
labels_mask[labels_mask > 0.8] = 1
labels_mask[labels_mask <= 0.8] = 0
labels_mask = labels_mask.bool()
generated_true[labels_mask] = ignore_index
num_batches = math.ceil(batch_size / minibatch_size)
for batch_idx in range(num_batches):
begin_idx = (batch_idx * minibatch_size)
end_idx = ((batch_idx + 1) * minibatch_size)
preds_subset = generated_preds[begin_idx:end_idx]
true_subset = generated_true[begin_idx:end_idx]
torchmetrics_xent.update(preds_subset, true_subset)
ce_with_keys_metric.update(
{
'logits': preds_subset.view(-1, num_classes),
'loss': cross_entropy(preds_subset.view(-1, num_classes), true_subset.view(-1))
}, true_subset.view(-1))
torchmetrics_loss = torchmetrics_xent.compute()
ce_with_keys_loss = ce_with_keys_metric.compute()
correct_loss = cross_entropy(generated_preds.view(-1, num_classes), generated_true.view(-1))
assert torchmetrics_loss == ce_with_keys_loss
assert torch.isclose(correct_loss, torchmetrics_loss)
@pytest.mark.parametrize('batch_size', [1e2, 1e3, 1e4])
@pytest.mark.parametrize('minibatch_size', [256, 768])
def test_binary_f1(batch_size, minibatch_size):
"""Sanity check to make sure that BinaryF1 TorchMetrics implementation matches the sklearn implementation.
Generates a predicted set of labels, and a random set, and compares the resultant Binary F1 score.
Args:
batch_size (int): how many samples are in each batch
minibatch_size (int): the minibatch size to simulate for model predictions
"""
pytest.importorskip('sklearn', reason='sklearn is an optional dependency')
from sklearn.metrics import f1_score
batch_size = int(batch_size)
generated_preds = torch.randn(size=(batch_size, 2))
generated_true = torch.randint(low=0, high=2, size=(batch_size,))
binary_f1 = BinaryF1Score()
num_batches = math.ceil(batch_size / minibatch_size)
for batch_idx in range(num_batches):
begin_idx = (batch_idx * minibatch_size)
end_idx = ((batch_idx + 1) * minibatch_size)
preds_subset = generated_preds[begin_idx:end_idx]
true_subset = generated_true[begin_idx:end_idx]
binary_f1.update(preds_subset, true_subset)
torchmetrics_f1 = binary_f1.compute()
generated_preds = torch.argmax(generated_preds, dim=1)
correct_f1 = f1_score(y_true=generated_true, y_pred=generated_preds)
assert correct_f1 == torchmetrics_f1
def test_hf_cross_entropy_equivalence():
batch_size = 1024
sequence_length = 64
num_classes = 10
ignore_index = -100
minibatch_size = 128
generated_preds = torch.randn((batch_size, sequence_length, num_classes))
generated_true = torch.randint(low=0, high=num_classes, size=(batch_size, sequence_length))
ce_with_keys_metric = HFCrossEntropy(dist_sync_on_step=False)
ce_direct_loss_metric = HFCrossEntropy(dist_sync_on_step=False)
ce_tensors_metric = HFCrossEntropy(dist_sync_on_step=False)
labels_mask = torch.rand((batch_size, sequence_length))
labels_mask[labels_mask > 0.8] = 1
labels_mask[labels_mask <= 0.8] = 0
labels_mask = labels_mask.bool()
generated_true[labels_mask] = ignore_index
num_batches = math.ceil(batch_size / minibatch_size)
for batch_idx in range(num_batches):
begin_idx = (batch_idx * minibatch_size)
end_idx = ((batch_idx + 1) * minibatch_size)
preds_subset = generated_preds[begin_idx:end_idx]
true_subset = generated_true[begin_idx:end_idx]
ce_tensors_metric.update(preds_subset.view(-1, num_classes), true_subset.view(-1))
ce_with_keys_metric.update({'logits': preds_subset.view(-1, num_classes)}, true_subset.view(-1))
ce_direct_loss_metric.update({'loss': cross_entropy(preds_subset.view(-1, num_classes), true_subset.view(-1))},
true_subset)
ce_tensors = ce_tensors_metric.compute()
ce_with_keys = ce_with_keys_metric.compute()
ce_direct_loss = ce_direct_loss_metric.compute()
correct_loss = cross_entropy(generated_preds.view(-1, num_classes), generated_true.view(-1))
assert ce_tensors == ce_with_keys
assert ce_tensors == ce_direct_loss
assert all(torch.isclose(metric, correct_loss) for metric in [ce_tensors, ce_with_keys, ce_direct_loss])
def test_hf_perplexity():
batch_size = 1024
sequence_length = 64
num_classes = 10
ignore_index = -100
minibatch_size = 128
generated_preds = torch.randn((batch_size, sequence_length, num_classes))
generated_true = torch.randint(low=0, high=num_classes, size=(batch_size, sequence_length))
ce_metric = HFCrossEntropy(dist_sync_on_step=False)
perplexity_metric = Perplexity(dist_sync_on_step=False)
labels_mask = torch.rand((batch_size, sequence_length))
labels_mask[labels_mask > 0.8] = 1
labels_mask[labels_mask <= 0.8] = 0
labels_mask = labels_mask.bool()
generated_true[labels_mask] = ignore_index
num_batches = math.ceil(batch_size / minibatch_size)
for batch_idx in range(num_batches):
begin_idx = (batch_idx * minibatch_size)
end_idx = ((batch_idx + 1) * minibatch_size)
preds_subset = generated_preds[begin_idx:end_idx]
true_subset = generated_true[begin_idx:end_idx]
ce_metric.update(preds_subset.view(-1, num_classes), true_subset.view(-1))
perplexity_metric.update(preds_subset.view(-1, num_classes), true_subset.view(-1))
ce = ce_metric.compute()
perplexity = perplexity_metric.compute()
assert torch.equal(torch.exp(ce), perplexity)
def test_language_perplexity():
batch_size = 1024
sequence_length = 64
num_classes = 10
ignore_index = -100
minibatch_size = 128
generated_preds = torch.randn((batch_size, sequence_length, num_classes))
generated_true = torch.randint(low=0, high=num_classes, size=(batch_size, sequence_length))
ce_metric = LanguageCrossEntropy(dist_sync_on_step=False)
perplexity_metric = LanguagePerplexity(dist_sync_on_step=False)
labels_mask = torch.rand((batch_size, sequence_length))
labels_mask[labels_mask > 0.8] = 1
labels_mask[labels_mask <= 0.8] = 0
labels_mask = labels_mask.bool()
generated_true[labels_mask] = ignore_index
num_batches = math.ceil(batch_size / minibatch_size)
for batch_idx in range(num_batches):
begin_idx = (batch_idx * minibatch_size)
end_idx = ((batch_idx + 1) * minibatch_size)
preds_subset = generated_preds[begin_idx:end_idx]
true_subset = generated_true[begin_idx:end_idx]
ce_metric.update(preds_subset, true_subset)
perplexity_metric.update(preds_subset, true_subset)
ce = ce_metric.compute()
perplexity = perplexity_metric.compute()
assert torch.equal(torch.exp(ce), perplexity)
def test_in_context_learning_lm_accuracy(tiny_gpt2_tokenizer):
contexts = ['The dog is', 'I love to eat', 'I hate', 'The weather is']
continuations = [' furry', ' pie', ' long lines', ' snowy']
pad = tiny_gpt2_tokenizer.pad_token_id
inputs = [
tiny_gpt2_tokenizer(context)['input_ids'] + tiny_gpt2_tokenizer(continuation)['input_ids']
for context, continuation in zip(contexts, continuations)
]
inputs = torch.tensor([input + [pad] * (2048 - len(input)) for input in inputs])
cont_idxs = []
for context, continuation in zip(contexts, continuations):
start = len(tiny_gpt2_tokenizer(context)['input_ids'])
end = start + len(tiny_gpt2_tokenizer(continuation)['input_ids'])
cont_idxs.append(torch.tensor(list(range(start, end))))
batch = {'continuation_indices': cont_idxs, 'labels': inputs, 'input_ids': inputs}
logits = torch.nn.functional.one_hot(inputs, num_classes=pad + 1)
logits[2] = logits[1].clone() # make one of the answers incorrect
metric = InContextLearningLMAccuracy()
metric.update(batch, logits, batch['labels'])
assert metric.compute() == 0.75
def test_in_context_learning_qa_accuracy():
outputs = ['Correct but then some more text', 'Incorrect', ' the CORREct with weird casing and spacing']
labels = [['Correct'], ['blah', 'blah2'], ['blah', 'correct']]
metric = InContextLearningQAAccuracy()
metric.update(outputs, labels)
assert metric.compute() == (2 / 3)
def test_in_context_learning_mc_accuracy(tiny_gpt2_tokenizer):
contexts = [
'Q: How do you cook a cake?', 'Q: How do you cook a cake?', 'Q: How old is the earth?',
'Q: How old is the earth?'
]
continuations = [' A: turn on the oven', ' A: do a backflip', ' A: 2 minutes', ' A: 4.5 billion years']
gold_indices = [0, 1]
choice_groupings = [(0, 2), (2, 4)]
pad = tiny_gpt2_tokenizer.pad_token_id
inputs = [
tiny_gpt2_tokenizer(context)['input_ids'] + tiny_gpt2_tokenizer(continuation)['input_ids']
for context, continuation in zip(contexts, continuations)
]
inputs = torch.tensor([input + [pad] * (2048 - len(input)) for input in inputs])
cont_idxs = []
for context, continuation in zip(contexts, continuations):
start = len(tiny_gpt2_tokenizer(context)['input_ids'])
end = start + len(tiny_gpt2_tokenizer(continuation)['input_ids'])
cont_idxs.append(torch.tensor(list(range(start, end))))
batch = {
'continuation_indices': cont_idxs,
'labels': inputs,
'input_ids': inputs,
'gold_indices': gold_indices,
'choice_groupings': choice_groupings
}
logits = torch.nn.functional.one_hot(inputs, num_classes=pad + 1).float()
# for the first two, the correct answer is continuation 0
# make the answer correct by making continuation 0 more likely for both answers
start, end = cont_idxs[1].tolist()[0], cont_idxs[1].tolist()[-1]
logits[1][start:end] = logits[0][start:end].clone()
# for the last two, the correct answer is continuation 3
# make the answer incorrect by making continuation 2 more likely for both answers
start, end = cont_idxs[3].tolist()[0], cont_idxs[3].tolist()[-1]
logits[3][start:end] = logits[2][start:end].clone()
metric = InContextLearningMultipleChoiceAccuracy()
metric.update(batch, logits, batch['labels'])
assert metric.compute() == 0.5
| composer-dev | tests/metrics/test_nlp_metrics.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Dict, Optional, Sequence, Tuple
import torch
from composer.core import Callback, State, TimeUnit
from composer.devices import Device
from composer.loggers import Logger
class MetricSetterCallback(Callback):
def __init__(
self,
monitor: str,
dataloader_label: str,
metric_cls: Callable, # metric function
metric_sequence: Sequence,
unit: TimeUnit,
device: Optional[Device] = None,
metric_args: Optional[Dict] = None):
self.monitor = monitor
self.dataloader_label = dataloader_label
self.metric_cls = metric_cls
self.metric_sequence = metric_sequence
self.unit = unit
self.device = device
self.metric_args = metric_args
if self.metric_args is None:
self.metric_args = {}
def _generate_dummy_metric_inputs(self, target_val) -> Tuple[torch.Tensor, torch.Tensor]:
"""Generate fake set of predictions and target values to satisfy the given target accuracy value."""
# predictions is a tensor with a ratio of target_val 1s to sub_target 0s
preds_ones = torch.ones(int(target_val * 10), dtype=torch.uint8)
sub_target = float('{0:.2f}'.format((1 - target_val) * 10))
preds_zeros = torch.zeros(int(sub_target), dtype=torch.uint8)
preds = torch.cat((preds_ones, preds_zeros))
# targets is a tensor full of ones
targets = torch.ones(10, dtype=torch.uint8)
return (preds, targets)
def _update_metrics(self, state: State):
idx = min(len(self.metric_sequence) - 1, state.timestamp.get(self.unit).value)
metric_val = self.metric_sequence[idx]
if self.dataloader_label == 'train':
state.train_metrics = state.train_metrics if state.train_metrics else {}
else:
state.eval_metrics[self.dataloader_label] = state.eval_metrics.get(self.dataloader_label, dict())
metric_tensor = torch.tensor(metric_val)
if self.device is not None:
self.device.tensor_to_device(metric_tensor)
raw_metric = self.metric_cls(**self.metric_args) # type: ignore
preds, targets = self._generate_dummy_metric_inputs(metric_val)
raw_metric.update(preds=preds, target=targets)
# assert for pyright error: "module_to_device" is not a known member of "None"
assert self.device is not None
self.device.module_to_device(raw_metric)
if self.dataloader_label == 'train':
state.train_metrics[self.monitor] = raw_metric
else:
state.eval_metrics[self.dataloader_label][self.monitor] = raw_metric
def eval_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
self._update_metrics(state)
def epoch_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
self._update_metrics(state)
def batch_end(self, state: State, logger: Logger) -> None:
if self.unit == TimeUnit.BATCH and self.dataloader_label == state.dataloader_label:
self._update_metrics(state)
| composer-dev | tests/metrics/metric_setter_callback.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import csv
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import yaml
from torch.utils.data import DataLoader
from composer.loggers import MLFlowLogger
from composer.trainer import Trainer
from tests.common.datasets import RandomImageDataset
from tests.common.models import SimpleConvModel
def test_mlflow_experiment_set_up_correctly(tmp_path):
mlflow = pytest.importorskip('mlflow')
mlflow_uri = tmp_path / Path('my-test-mlflow-uri')
mlflow_exp_name = 'my-test-mlflow-exp'
mlflow_run_name = 'my-test-mlflow-run'
test_mlflow_logger = MLFlowLogger(experiment_name=mlflow_exp_name,
run_name=mlflow_run_name,
tracking_uri=mlflow_uri)
mock_state = MagicMock()
mock_state.run_name = 'dummy-run-name' # this run name should be unused.
mock_logger = MagicMock()
test_mlflow_logger.init(state=mock_state, logger=mock_logger)
run_info = mlflow.active_run().info
run_id = run_info.run_id
experiment_id = run_info.experiment_id
# Check uri set correctly.
assert mlflow_uri.exists()
# Check experiment name set correctly.
exp_cfg_file_path = mlflow_uri / Path(experiment_id) / Path('meta.yaml')
exp_cfg = yaml.safe_load(open(str(exp_cfg_file_path), 'r'))
expected_exp_name = mlflow_exp_name
actual_exp_name = exp_cfg['name']
assert actual_exp_name == expected_exp_name
# Check run_name set correctly.
run_cfg_file_path = mlflow_uri / Path(experiment_id) / Path(run_id) / Path('meta.yaml')
run_cfg = yaml.safe_load(open(str(run_cfg_file_path), 'r'))
expected_run_name = mlflow_run_name
actual_run_name = run_cfg['run_name']
assert actual_run_name == expected_run_name
# Check run ended.
test_mlflow_logger.post_close()
assert mlflow.active_run() is None
# Check new run can be created.
del test_mlflow_logger
test_mlflow_logger = MLFlowLogger(experiment_name=mlflow_exp_name, run_name=mlflow_run_name + '_new')
test_mlflow_logger.init(state=mock_state, logger=mock_logger)
test_mlflow_logger.post_close()
def test_mlflow_logging_works(tmp_path):
mlflow = pytest.importorskip('mlflow')
mlflow_uri = tmp_path / Path('my-test-mlflow-uri')
test_mlflow_logger = MLFlowLogger(tracking_uri=mlflow_uri)
dataset_size = 64
batch_size = 4
num_batches = 4
eval_interval = '1ba'
trainer = Trainer(model=SimpleConvModel(),
loggers=test_mlflow_logger,
train_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
eval_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
max_duration=f'{num_batches}ba',
eval_interval=eval_interval)
trainer.fit()
run_info = mlflow.active_run().info
run_id = run_info.run_id
experiment_id = run_info.experiment_id
run_file_path = mlflow_uri / Path(experiment_id) / Path(run_id)
# Test metrics logged.
for metric_name in [
'metrics/train/MulticlassAccuracy', 'metrics/eval/MulticlassAccuracy', 'metrics/eval/CrossEntropy',
'loss/train/total'
]:
metric_file = run_file_path / Path('metrics') / Path(metric_name)
with open(metric_file) as f:
csv_reader = csv.reader(f, delimiter=' ')
lines = [line for line in csv_reader]
assert len(lines) == num_batches
# Test params logged.
param_path = run_file_path / Path('params')
actual_params_list = [param_filepath.stem for param_filepath in param_path.iterdir()]
expected_params_list = ['num_cpus_per_node', 'node_name', 'num_nodes', 'rank_zero_seed']
assert set(expected_params_list) == set(actual_params_list)
| composer-dev | tests/loggers/test_mlflow_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import MagicMock
import pytest
import torch.utils.data
from _pytest.monkeypatch import MonkeyPatch
from tqdm import auto
from composer.core.time import Time, TimeUnit
from composer.loggers import ProgressBarLogger
from composer.trainer.trainer import Trainer
from composer.utils import dist
from tests.common import RandomClassificationDataset, SimpleModel
@pytest.mark.parametrize('world_size', [
pytest.param(1),
pytest.param(2, marks=pytest.mark.world_size(2)),
])
@pytest.mark.parametrize(
'max_duration',
[Time.from_timestring('2ep'),
Time.from_timestring('100sp'),
Time.from_timestring('5ba')],
)
def test_progress_bar_logger(max_duration: Time[int], monkeypatch: MonkeyPatch, world_size: int):
mock_tqdms_train = []
mock_tqdms_eval = []
def get_mock_tqdm(bar_format: str, *args: object, **kwargs: object):
del args, kwargs # unused
mock_tqdm = MagicMock()
mock_tqdm.n = 0
# store for testing later
if 'train' in bar_format:
mock_tqdms_train.append(mock_tqdm)
if 'eval' in bar_format:
mock_tqdms_eval.append(mock_tqdm)
return mock_tqdm
model = SimpleModel()
monkeypatch.setattr(auto, 'tqdm', get_mock_tqdm)
eval_interval = 1
eval_subset_num_batches = 2
batch_size = 10
train_dataset = RandomClassificationDataset()
eval_dataset = RandomClassificationDataset()
trainer = Trainer(
model=model,
max_duration=max_duration,
eval_interval=eval_interval,
progress_bar=True,
train_dataloader=torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=dist.get_sampler(train_dataset),
),
eval_dataloader=torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
sampler=dist.get_sampler(train_dataset),
),
eval_subset_num_batches=eval_subset_num_batches,
)
trainer.fit()
if dist.get_local_rank() != 0:
return
# either have #epoch pbars, or only have 1 train pbar
if max_duration.unit == TimeUnit.EPOCH:
assert len(mock_tqdms_train) == max_duration.value
else:
assert len(mock_tqdms_train) == 1
# test train pbar
if max_duration.unit == TimeUnit.EPOCH:
for mt in mock_tqdms_train:
assert trainer.state.dataloader_len is not None
assert mt.update.call_count == int(trainer.state.dataloader_len)
elif max_duration.unit == TimeUnit.BATCH:
for mt in mock_tqdms_train:
assert mt.update.call_count == max_duration.value
elif max_duration.unit == TimeUnit.SAMPLE:
for mt in mock_tqdms_train:
assert mt.update.call_count == max_duration.value // batch_size / world_size
# test eval pbar
for mt in mock_tqdms_eval:
assert mt.update.call_count == eval_subset_num_batches
def test_progress_bar_warning():
with pytest.warns(Warning):
Trainer(model=SimpleModel(), loggers=ProgressBarLogger())
| composer-dev | tests/loggers/test_progress_bar_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import math
import re
from pathlib import Path
import pytest
from torch.utils.data import DataLoader
from torchmetrics import MetricCollection
from composer.callbacks import SpeedMonitor
from composer.core import Evaluator
from composer.loggers import ConsoleLogger
from composer.loggers.console_logger import NUM_EVAL_LOGGING_EVENTS
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
@pytest.fixture
def console_logger_test_file_path(tmp_path) -> str:
return str(Path(tmp_path) / Path('console_test'))
@pytest.fixture
def console_logger_test_stream(console_logger_test_file_path):
return open(console_logger_test_file_path, 'w')
@pytest.mark.filterwarnings('ignore:Cannot split tensor of length .* into batches of size .*:UserWarning')
@pytest.mark.parametrize('log_interval_unit', ['ba', 'ep'])
@pytest.mark.parametrize('max_duration_unit', ['ba', 'ep'])
@pytest.mark.parametrize('log_interval', [1, 2, 3])
@pytest.mark.parametrize('max_duration', [8, 9, 10, 11])
def test_console_logger_interval(console_logger_test_stream, console_logger_test_file_path, log_interval, max_duration,
log_interval_unit, max_duration_unit):
batch_size = 4
dataset_size = 17
batches_per_epoch = math.ceil(dataset_size / batch_size)
model = SimpleModel()
trainer = Trainer(
model=model,
console_stream=console_logger_test_stream,
console_log_interval=f'{log_interval}{log_interval_unit}',
log_to_console=True,
progress_bar=False,
train_dataloader=DataLoader(RandomClassificationDataset(size=dataset_size), batch_size=batch_size),
max_duration=f'{max_duration}{max_duration_unit}',
)
trainer.fit()
console_logger_test_stream.flush()
console_logger_test_stream.close()
with open(console_logger_test_file_path, 'r') as f:
lines = f.readlines()
# Make a regular expression for matches for any line that contains "Train" followed by
# a colon.
reg_exp = re.compile('Train *:*')
actual_num_log_lines = sum(
[1 if bool(reg_exp.search(line)) and ('trainer/' not in line and 'epoch' not in line) else 0 for line in lines])
assert model.train_metrics is not None
num_metrics = len(list(model.train_metrics.keys())) if isinstance(model.train_metrics, MetricCollection) else 1
num_losses = 1
num_metrics_and_losses_per_logging_event = num_metrics + num_losses
if log_interval_unit == max_duration_unit:
expected_num_logging_events = max_duration // log_interval
elif log_interval_unit == 'ba' and max_duration_unit == 'ep':
expected_num_logging_events = (batches_per_epoch * max_duration) // log_interval
else: # for the case where log_interval_unit == 'ep' and max_duration == 'ba'.
total_epochs = max_duration // batches_per_epoch
expected_num_logging_events = total_epochs // log_interval
if log_interval != 1:
expected_num_logging_events += 1 # Because we automatically log the first batch or epoch.
expected_num_lines = expected_num_logging_events * num_metrics_and_losses_per_logging_event
assert actual_num_log_lines == expected_num_lines
@pytest.mark.parametrize('eval_interval_unit', ['ba', 'ep'])
@pytest.mark.parametrize('max_duration_unit', ['ba', 'ep'])
@pytest.mark.parametrize('eval_interval', [2, 3])
@pytest.mark.parametrize('max_duration', [8, 9])
@pytest.mark.parametrize('pass_in_fit', [True, False])
def test_console_logger_fit(
console_logger_test_stream,
console_logger_test_file_path,
eval_interval,
max_duration,
eval_interval_unit,
max_duration_unit,
pass_in_fit,
):
batch_size = 4
dataset_size = 16
eval_batch_size = 2
eval_dataset_size = 24
batches_per_epoch = math.ceil(dataset_size / batch_size)
model = SimpleModel()
trainer = Trainer(
model=model,
console_stream=console_logger_test_stream,
eval_interval=f'{eval_interval}{eval_interval_unit}',
log_to_console=True,
progress_bar=False,
train_dataloader=DataLoader(RandomClassificationDataset(size=dataset_size), batch_size=batch_size),
eval_dataloader=DataLoader(RandomClassificationDataset(size=eval_dataset_size), batch_size=eval_batch_size),
max_duration=f'{max_duration}{max_duration_unit}',
)
if pass_in_fit:
eval_dataloader = DataLoader(RandomClassificationDataset(size=eval_dataset_size), batch_size=eval_batch_size)
trainer.fit(
eval_dataloader=eval_dataloader,
reset_time=True,
eval_interval=f'{eval_interval}{eval_interval_unit}',
)
else:
trainer.fit()
console_logger_test_stream.flush()
console_logger_test_stream.close()
with open(console_logger_test_file_path, 'r') as f:
lines = f.readlines()
# Make a regular expression for matches for any line that contains "Eval" followed by
# a colon.
eval_reg_exp = re.compile('Eval *:*')
actual_num_eval_log_lines = sum([1 if bool(eval_reg_exp.search(line)) else 0 for line in lines])
assert model.val_metrics is not None
num_eval_metrics_per_event = len(list(model.val_metrics.keys())) if isinstance(model.val_metrics,
MetricCollection) else 1
num_eval_losses = 0
num_eval_metrics_and_losses_per_logging_event = num_eval_metrics_per_event + num_eval_losses
if eval_interval_unit == max_duration_unit:
expected_num_eval_logging_events, remainder = divmod(max_duration, eval_interval)
elif eval_interval_unit == 'ba' and max_duration_unit == 'ep':
expected_num_eval_logging_events, remainder = divmod((batches_per_epoch * max_duration), eval_interval)
else: # for the case where eval_interval_unit == 'ep' and max_duration == 'ba'.
batches_per_logging_event = batches_per_epoch * eval_interval
expected_num_eval_logging_events, remainder = divmod(max_duration, batches_per_logging_event)
num_progress_events_due_to_eval_interval = NUM_EVAL_LOGGING_EVENTS
num_eval_progress_lines_per_eval_event = num_progress_events_due_to_eval_interval
# An eval logging event always happens at fit_end, so if one would not normally fall at
# last batch or epoch, then add an extra event to the expected.
if remainder:
expected_num_eval_logging_events += 1
expected_num_eval_lines = expected_num_eval_logging_events * (num_eval_metrics_and_losses_per_logging_event +
num_eval_progress_lines_per_eval_event)
assert actual_num_eval_log_lines == expected_num_eval_lines
@pytest.mark.parametrize('eval_interval_unit', ['ba', 'ep'])
@pytest.mark.parametrize('max_duration_unit', ['ba', 'ep'])
@pytest.mark.parametrize('eval_interval', [2, 3])
@pytest.mark.parametrize('max_duration', [8, 9])
def test_console_logger_eval(
console_logger_test_stream,
console_logger_test_file_path,
eval_interval,
max_duration,
eval_interval_unit,
max_duration_unit,
):
batch_size = 4
dataset_size = 16
eval_batch_size = 2
eval_dataset_size = 24
batches_per_epoch = math.ceil(dataset_size / batch_size)
model = SimpleModel()
trainer = Trainer(
model=model,
console_stream=console_logger_test_stream,
eval_interval=f'{eval_interval}{eval_interval_unit}',
log_to_console=True,
progress_bar=False,
train_dataloader=DataLoader(RandomClassificationDataset(size=dataset_size), batch_size=batch_size),
eval_dataloader=DataLoader(RandomClassificationDataset(size=eval_dataset_size), batch_size=eval_batch_size),
max_duration=f'{max_duration}{max_duration_unit}',
)
trainer.eval(eval_dataloader=Evaluator(label='trainer.eval_dataloader',
dataloader=DataLoader(RandomClassificationDataset(size=eval_dataset_size),
batch_size=eval_batch_size)),)
console_logger_test_stream.flush()
console_logger_test_stream.close()
with open(console_logger_test_file_path, 'r') as f:
lines = f.readlines()
# Make a regular expression for matches for any line that contains "Eval" followed by
# a colon.
eval_reg_exp = re.compile('Eval *:*')
actual_num_eval_log_lines = sum([1 if bool(eval_reg_exp.search(line)) else 0 for line in lines])
assert model.val_metrics is not None
num_eval_metrics_per_event = len(list(model.val_metrics.keys())) if isinstance(model.val_metrics,
MetricCollection) else 1
if eval_interval_unit == max_duration_unit:
expected_num_eval_logging_events, remainder = divmod(max_duration, eval_interval)
elif eval_interval_unit == 'ba' and max_duration_unit == 'ep':
expected_num_eval_logging_events, remainder = divmod((batches_per_epoch * max_duration), eval_interval)
else: # for the case where eval_interval_unit == 'ep' and max_duration == 'ba'.
batches_per_logging_event = batches_per_epoch * eval_interval
expected_num_eval_logging_events, remainder = divmod(max_duration, batches_per_logging_event)
num_progress_events_due_to_eval_interval = NUM_EVAL_LOGGING_EVENTS
num_eval_progress_lines_per_eval_event = num_progress_events_due_to_eval_interval
# An eval logging event always happens at fit_end, so if one would not normally fall at
# last batch or epoch, then add an extra event to the expected.
if remainder:
expected_num_eval_logging_events += 1
expected_num_eval_logging_events_for_trainer_eval_call = 1
expected_num_eval_lines = expected_num_eval_logging_events_for_trainer_eval_call * (
num_eval_progress_lines_per_eval_event + num_eval_metrics_per_event)
assert actual_num_eval_log_lines == expected_num_eval_lines
def test_log_to_console_and_progress_bar_warning():
with pytest.warns(Warning):
Trainer(model=SimpleModel(), log_to_console=True, progress_bar=True)
with pytest.warns(Warning):
Trainer(model=SimpleModel(), loggers=ConsoleLogger())
@pytest.mark.parametrize('log_interval_unit', ['ba', 'ep'])
@pytest.mark.parametrize('max_duration_unit', ['ba', 'ep'])
@pytest.mark.parametrize('log_interval', [1])
@pytest.mark.parametrize('max_duration', [8])
def test_console_logger_with_a_callback(console_logger_test_stream, console_logger_test_file_path, log_interval,
max_duration, log_interval_unit, max_duration_unit):
batch_size = 4
dataset_size = 16
batches_per_epoch = math.ceil(dataset_size / batch_size)
model = SimpleModel()
trainer = Trainer(model=model,
console_stream=console_logger_test_stream,
console_log_interval=f'{log_interval}{log_interval_unit}',
log_to_console=True,
progress_bar=False,
callbacks=SpeedMonitor(),
train_dataloader=DataLoader(RandomClassificationDataset(size=dataset_size),
batch_size=batch_size),
max_duration=f'{max_duration}{max_duration_unit}')
trainer.fit()
console_logger_test_stream.flush()
console_logger_test_stream.close()
if log_interval_unit == max_duration_unit:
expected_num_logging_events = max_duration // log_interval
elif log_interval_unit == 'ba' and max_duration_unit == 'ep':
expected_num_logging_events = (batches_per_epoch * max_duration) // log_interval
else: # for the case where log_interval_unit == 'ep' and max_duration == 'ba'.
total_epochs = max_duration // batches_per_epoch
expected_num_logging_events = total_epochs // log_interval
if log_interval != 1:
expected_num_logging_events += 1 # Because we automatically log the first batch or epoch.
with open(console_logger_test_file_path, 'r') as f:
lines = f.readlines()
# Make a regular expression for matches for any line that contains "wall_clock" followed by
# a slash.
wallclock_reg_exp = re.compile('Train wall_clock*')
actual_num_wallclock_lines = sum([1 if bool(wallclock_reg_exp.search(line)) else 0 for line in lines])
num_wallclock_lines_per_log_event = 3
expected_wallclock_lines = num_wallclock_lines_per_log_event * expected_num_logging_events
assert actual_num_wallclock_lines == expected_wallclock_lines
| composer-dev | tests/loggers/test_console_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import imghdr
import os
import zipfile
from collections import defaultdict
from json import JSONDecoder
from pathlib import Path
from typing import Sequence
import pytest
import torch
from torch.utils.data import DataLoader
from composer.loggers import CometMLLogger
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
@pytest.fixture
def comet_offline_directory(tmp_path):
return str(tmp_path / Path('my_cometml_runs'))
@pytest.fixture
def comet_logger(monkeypatch, comet_offline_directory):
comet_ml = pytest.importorskip('comet_ml', reason='comet_ml is optional')
monkeypatch.setattr(comet_ml, 'Experiment', comet_ml.OfflineExperiment)
from composer.loggers import CometMLLogger
# Set offline directory.
os.environ['COMET_OFFLINE_DIRECTORY'] = comet_offline_directory
comet_logger = CometMLLogger()
return comet_logger
def test_comet_ml_log_image_saves_images(comet_logger: CometMLLogger, comet_offline_directory: str):
assert isinstance(comet_offline_directory, str)
# We group all the image size variants into one test because calling comet_experiment.end() is slow
image_variants = [
(torch.rand(4), False), # 1D image
(torch.rand(4, 4), False), # 2D image
(torch.rand(4, 4, 3), True), # with channels, channels last
(torch.rand(3, 4, 4), False), # with channels, not channels last
(torch.rand(2, 4, 4, 3), True), # multiple images in tensor
([torch.rand(4, 4, 3), torch.rand(4, 4, 3)], True) # multiple images in list
]
expected_num_images_total = 0
for (images, channels_last) in image_variants:
# Count expected images and generate numpy arrays from torch tensors.
if isinstance(images, Sequence):
expected_num_images = len(images)
np_images = [image.numpy() for image in images]
else:
expected_num_images = 1 if images.ndim < 4 else images.shape[0]
np_images = images.numpy()
# Log images from torch tensors and numpy arrays.
comet_logger.log_images(images, channels_last=channels_last)
comet_logger.log_images(np_images, channels_last=channels_last)
expected_num_images *= 2 # One set of torch tensors, one set of numpy arrays
expected_num_images_total += expected_num_images
comet_logger.post_close()
# Extract all files saved to comet offline directory.
assert comet_logger.experiment is not None
comet_exp_dump_filepath = str(Path(comet_offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
zf.extractall(comet_offline_directory)
# Count the number of files that are images.
actual_num_images = 0
for filename in Path(comet_offline_directory).iterdir():
file_path = str(Path(comet_offline_directory) / Path(filename))
if imghdr.what(file_path) == 'png':
actual_num_images += 1
assert actual_num_images == expected_num_images_total
def test_comet_ml_log_image_saves_images_with_masks(comet_logger: CometMLLogger, comet_offline_directory: str):
assert isinstance(comet_offline_directory, str)
# We group all the image size variants into one test because calling comet_experiment.end() is slow
image_variants = [
# channels last
# single image, single mask
(torch.rand(4, 4, 3), {
'pred': torch.randint(0, 10, (4, 4))
}, True),
# multiple images, masks in tensor
(torch.rand(2, 4, 4, 3), {
'pred': torch.randint(0, 10, (2, 4, 4))
}, True),
# multiple images, masks in last
(torch.rand(2, 4, 4, 3), {
'pred': 2 * [torch.randint(0, 10, (4, 4))]
}, True),
# multiple images, multiple masks
(torch.rand(2, 4, 4, 3), {
'pred': torch.randint(0, 10, (2, 4, 4)),
'pred2': torch.randint(0, 10, (2, 4, 4))
}, True),
# not channels last
# single image, single mask
(torch.rand(3, 4, 4), {
'pred': torch.randint(0, 10, (4, 4))
}, False),
# multiple images, masks in tensor
(torch.rand(2, 3, 4, 4), {
'pred': torch.randint(0, 10, (2, 4, 4))
}, False)
]
expected_num_masks_and_images_total = 0
for (images, masks, channels_last) in image_variants:
# Count expected images and generate numpy arrays from torch tensors.
num_masks = len(masks.keys())
num_images = images.shape[0] if images.ndim == 4 else 1
num_additional_images_per_mask = 2 # Mask overlaid on image + mask by itself.
expected_num_masks = num_images * num_additional_images_per_mask * num_masks
expected_num_masks_and_images = num_images + expected_num_masks
expected_num_masks_and_images_total += expected_num_masks_and_images
# Log images from torch tensors and numpy arrays.
comet_logger.log_images(images, masks=masks, channels_last=channels_last)
comet_logger.post_close()
# Extract all files saved to comet offline directory.
assert comet_logger.experiment is not None
comet_exp_dump_filepath = str(Path(comet_offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
zf.extractall(comet_offline_directory)
# Count the number of files that are images.
actual_num_images = 0
for filename in Path(comet_offline_directory).iterdir():
file_path = str(Path(comet_offline_directory) / Path(filename))
if imghdr.what(file_path) == 'png':
actual_num_images += 1
assert actual_num_images == expected_num_masks_and_images_total
def test_comet_ml_logging_train_loop(monkeypatch, tmp_path):
comet_ml = pytest.importorskip('comet_ml', reason='comet_ml is optional')
monkeypatch.setattr(comet_ml, 'Experiment', comet_ml.OfflineExperiment)
from composer.loggers import CometMLLogger
# Set offline directory.
offline_directory = str(tmp_path / Path('.my_cometml_runs'))
os.environ['COMET_OFFLINE_DIRECTORY'] = offline_directory
comet_logger = CometMLLogger()
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='2ep',
loggers=comet_logger,
)
trainer.fit()
run_name = trainer.state.run_name
del trainer
assert comet_logger.experiment is not None
assert comet_logger.experiment.ended
# Open, decompress, decode, and extract offline dump of metrics.
comet_exp_dump_filepath = str(Path(offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
comet_logs_path = zf.extract('messages.json', path=offline_directory)
jd = JSONDecoder()
msg_type_to_msgs = defaultdict(list)
with open(comet_logs_path) as f:
for line in f.readlines():
parsed_line = jd.decode(line)
msg_type_to_msgs[parsed_line['type']].append(parsed_line['payload'])
# Check that init set the run name
assert comet_logger.name == run_name
assert comet_logger.experiment.name == run_name
# Check that basic metrics appear in the comet logs
assert len([
metric_msg for metric_msg in msg_type_to_msgs['metric_msg']
if metric_msg['metric']['metricName'] == 'trainer/epoch'
]) == 2
# Check that basic params appear in the comet logs
assert len([
param_msg for param_msg in msg_type_to_msgs['parameter_msg']
if param_msg['param']['paramName'] == 'rank_zero_seed'
]) > 0
def test_comet_ml_log_metrics_and_hyperparameters(monkeypatch, tmp_path):
"""Check metrics logged with CometMLLogger are properly written to offline dump."""
pytest.importorskip('comet_ml', reason='comet_ml is optional')
import comet_ml
# Set some dummy log values.
steps = [0, 1, 2]
metric_values = [0.1, 0.4, 0.7]
metric_name = 'my_test_metric'
param_names = ['my_cool_parameter1', 'my_cool_parameter2']
param_values = [10, 3]
# Set offline directory.
offline_directory = str(tmp_path / Path('.my_cometml_runs'))
os.environ['COMET_OFFLINE_DIRECTORY'] = offline_directory
# Monkeypatch Experiment with OfflineExperiment to avoid uploading to CometML and
# avoid needing an API+KEY.
monkeypatch.setattr(comet_ml, 'Experiment', comet_ml.OfflineExperiment)
from composer.loggers import CometMLLogger
# Log dummy values with CometMLLogger.
comet_logger = CometMLLogger()
comet_logger.log_hyperparameters(dict(zip(param_names, param_values)))
for step, metric_value in zip(steps, metric_values):
comet_logger.log_metrics({'my_test_metric': metric_value}, step=step)
# Simulate the post_close call to end the CometML experiment
comet_logger.post_close()
assert comet_logger.experiment is not None
# Check that calling post_close ended the comet experiment
assert comet_logger.experiment.ended
# Open, decompress, decode, and extract offline dump of metrics.
comet_exp_dump_filepath = str(Path(offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
comet_logs_path = zf.extract('messages.json', path=offline_directory)
jd = JSONDecoder()
created_from_found = False
expected_created_from_log = {'key': 'Created from', 'val': 'mosaicml-composer'}
metric_msgs = []
param_msgs = []
with open(comet_logs_path) as f:
for line in f.readlines():
comet_msg = jd.decode(line)
if comet_msg['type'] == 'ws_msg' and comet_msg['payload'].get('log_other', {}) == expected_created_from_log:
created_from_found = True
if (comet_msg['type'] == 'metric_msg') and (comet_msg['payload']['metric']['metricName']
== 'my_test_metric'):
metric_msgs.append(comet_msg['payload']['metric'])
if comet_msg['type'] == 'parameter_msg' and (
comet_msg['payload']['param']['paramName'].startswith('my_cool')):
param_msgs.append(comet_msg['payload']['param'])
# Check that the "Created from key was properly set"
assert created_from_found
# Assert dummy metrics input to log_metrics are the same as
# those written to offline dump.
assert [msg['metricValue'] for msg in metric_msgs] == metric_values
assert [msg['step'] for msg in metric_msgs] == steps
assert all([msg['metricName'] == metric_name for msg in metric_msgs])
# Assert dummy params input to log_hyperparameters are the same as
# those written to offline dump
assert [msg['paramValue'] for msg in param_msgs] == param_values
assert [msg['paramName'] for msg in param_msgs] == param_names
| composer-dev | tests/loggers/test_cometml_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/loggers/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import multiprocessing
import os
import pathlib
import random
import shutil
import time
from typing import Any, Callable, Dict, Optional, Union
from unittest.mock import patch
import pytest
from composer.core import Event, State
from composer.loggers import Logger, RemoteUploaderDownloader
from composer.utils.object_store.object_store import ObjectStore
class DummyObjectStore(ObjectStore):
"""Dummy ObjectStore implementation that is backed by a local directory."""
def __init__(self, dir: Optional[pathlib.Path] = None, always_fail: bool = False, **kwargs: Dict[str, Any]) -> None:
self.dir = str(dir) if dir is not None else kwargs['bucket']
self.always_fail = always_fail
assert isinstance(self.dir, str)
os.makedirs(self.dir, exist_ok=True)
def get_uri(self, object_name: str) -> str:
return 'local://' + object_name
def _get_abs_path(self, object_name: str):
assert isinstance(self.dir, str)
return os.path.abspath(self.dir + '/' + object_name)
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
) -> None:
if self.always_fail and object_name != '.credentials_validated_successfully':
raise RuntimeError('Crash because you set always_fail to true!')
time.sleep(random.random() * 0.5) # random sleep to simulate random network latency
shutil.copy2(filename, self._get_abs_path(object_name))
def download_object(self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None) -> None:
if self.always_fail:
raise RuntimeError('Crash because you set always_fail to true!')
if not overwrite and os.path.exists(filename):
raise FileExistsError
return shutil.copy2(self._get_abs_path(object_name), filename)
def get_object_size(self, object_name: str) -> int:
size = os.stat(self._get_abs_path(object_name)).st_size
return size
def object_store_test_helper(
tmp_path: pathlib.Path,
dummy_state: State,
use_procs: bool = False,
overwrite: bool = True,
overwrite_delay: bool = False,
event_to_test: Event = Event.BATCH_END,
):
remote_dir = str(tmp_path / 'object_store')
os.makedirs(remote_dir, exist_ok=True)
# Patching does not work when using multiprocessing with spawn, so we also
# patch to use fork
fork_context = multiprocessing.get_context('fork')
with patch('composer.loggers.remote_uploader_downloader.S3ObjectStore', DummyObjectStore):
with patch('composer.loggers.remote_uploader_downloader.multiprocessing.get_context', lambda _: fork_context):
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri='s3://{remote_dir}',
backend_kwargs={
'dir': remote_dir,
},
num_concurrent_uploads=1,
use_procs=use_procs,
upload_staging_folder=str(tmp_path / 'staging_folder'),
num_attempts=1,
)
logger = Logger(dummy_state, destinations=[remote_uploader_downloader])
remote_file_name = 'remote_file_name'
remote_uploader_downloader.run_event(Event.INIT, dummy_state, logger)
file_path = os.path.join(tmp_path, f'file')
with open(file_path, 'w+') as f:
f.write('1')
logger.upload_file(remote_file_name, file_path, overwrite=overwrite)
file_path_2 = os.path.join(tmp_path, f'file_2')
with open(file_path_2, 'w+') as f:
f.write('2')
post_close_ctx = contextlib.nullcontext()
if not overwrite:
# If not `overwrite_delay`, then the `logger.upload_file` will raise a FileExistsException, because the upload will not even be enqueued
# Otherwise, with a sufficient will be uploaded, and cleared from the queue, causing a runtime error to be raised on Event.BATCH_END or Event.EPOCH_END
# A 2 second sleep should be enough here -- the DummyObjectStore will block for at most 0.5 seconds, and the RemoteUploaderDownloader polls every 0.1 seconds
if overwrite_delay:
post_close_ctx = pytest.warns(
RuntimeWarning,
match=
r'The following objects may not have been uploaded, likely due to a worker crash: remote_file_name'
)
# Wait for the first upload to go through
time.sleep(2)
# Do the second upload -- it should enqueue
logger.upload_file(remote_file_name, file_path_2, overwrite=overwrite)
# Give it some time to finish the second upload
# (Since the upload is really a file copy, it should be fast)
time.sleep(2)
# Then, crashes are detected on the next batch end, but
# should be a FileExistsError not a runtime error because the parent will raise
# the fatal exception that the worker throws.
with pytest.raises(
FileExistsError,
match=
f'Object local://{remote_file_name} already exists, but allow_overwrite was set to False.'):
remote_uploader_downloader.run_event(event_to_test, dummy_state, logger)
else:
# Otherwise, if no delay, it should error when being enqueued
with pytest.raises(
FileExistsError,
match=f'Object {remote_file_name} was already enqueued to be uploaded, but overwrite=False.'
):
logger.upload_file(remote_file_name, file_path_2, overwrite=overwrite)
remote_uploader_downloader.close(dummy_state, logger)
with post_close_ctx:
remote_uploader_downloader.post_close()
# verify upload uri for file is correct
upload_uri = remote_uploader_downloader.get_uri_for_file(remote_file_name)
expected_upload_uri = f'local://{remote_file_name}'
assert upload_uri == expected_upload_uri
# Test downloading file
download_path = os.path.join(tmp_path, 'download')
remote_uploader_downloader.download_file(remote_file_name, download_path)
with open(download_path, 'r') as f:
assert f.read() == '1' if not overwrite else '2'
# now assert that we have a dummy file in the remote folder
remote_file = os.path.join(str(remote_dir), remote_file_name)
# Verify file contains the correct value
with open(remote_file, 'r') as f:
assert f.read() == '1' if not overwrite else '2'
def test_remote_uploader_downloader(tmp_path: pathlib.Path, dummy_state: State):
object_store_test_helper(tmp_path=tmp_path, dummy_state=dummy_state, use_procs=False)
def test_remote_uploader_downloader_use_procs(tmp_path: pathlib.Path, dummy_state: State):
object_store_test_helper(tmp_path=tmp_path, dummy_state=dummy_state, use_procs=True)
@pytest.mark.filterwarnings(r'ignore:((.|\n)*)FileExistsError((.|\n)*):pytest.PytestUnhandledThreadExceptionWarning')
@pytest.mark.parametrize('overwrite_delay', [True, False])
@pytest.mark.parametrize('event_to_test', [Event.BATCH_END, Event.EPOCH_END])
def test_remote_uploader_downloader_no_overwrite(tmp_path: pathlib.Path, dummy_state: State, overwrite_delay: bool,
event_to_test: Event):
if not overwrite_delay and event_to_test == Event.EPOCH_END:
pytest.skip('event_to_test does not affect the overwrite_delay=False part of the test')
object_store_test_helper(tmp_path=tmp_path,
dummy_state=dummy_state,
overwrite=False,
overwrite_delay=overwrite_delay,
event_to_test=event_to_test)
@pytest.mark.parametrize('use_procs', [True, False])
def test_race_with_overwrite(tmp_path: pathlib.Path, use_procs: bool, dummy_state: State):
# Test a race condition with the object store logger where multiple files with the same name are logged in rapid succession
# The latest version should be the one that is uploaded
# Setup: Prep files
num_files = 10
os.makedirs(tmp_path / 'samples')
for i in range(num_files):
with open(tmp_path / 'samples' / f'sample_{i}', 'w+') as f:
f.write(str(i))
# Patching does not work when using multiprocessing with spawn, so we also
# patch to use fork
fork_context = multiprocessing.get_context('fork')
with patch('composer.loggers.remote_uploader_downloader.S3ObjectStore', DummyObjectStore):
with patch('composer.loggers.remote_uploader_downloader.multiprocessing.get_context', lambda _: fork_context):
# Create the object store logger
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri=f"s3://{tmp_path}/'object_store_backend",
backend_kwargs={
'dir': tmp_path / 'object_store_backend',
},
num_concurrent_uploads=4,
use_procs=use_procs,
upload_staging_folder=str(tmp_path / 'staging_folder'),
num_attempts=1,
)
logger = Logger(dummy_state, destinations=[remote_uploader_downloader])
remote_uploader_downloader.run_event(Event.INIT, dummy_state, logger)
# Queue the files for upload in rapid succession to the same remote_file_name
remote_file_name = 'remote_file_name'
for i in range(num_files):
file_path = tmp_path / 'samples' / f'sample_{i}'
remote_uploader_downloader.upload_file(dummy_state, remote_file_name, file_path, overwrite=True)
# Shutdown the logger. This should wait until all objects are uploaded
remote_uploader_downloader.close(dummy_state, logger=logger)
remote_uploader_downloader.post_close()
# Assert that the file called "remote_file_name" has the content of the last file uploaded file -- i.e. `num_files` - 1
destination = tmp_path / 'downloaded_file'
remote_uploader_downloader.download_file(remote_file_name,
str(destination),
overwrite=False,
progress_bar=False)
with open(destination, 'r') as f:
assert f.read() == str(num_files - 1)
@pytest.mark.filterwarnings(r'ignore:Exception in thread:pytest.PytestUnhandledThreadExceptionWarning')
def test_close_on_failure(tmp_path: pathlib.Path, dummy_state: State):
"""Test that .close() and .post_close() does not hang even when a worker crashes."""
with patch('composer.loggers.remote_uploader_downloader.S3ObjectStore', DummyObjectStore):
# Create the object store logger
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri=f"s3://{tmp_path}/'object_store_backend",
backend_kwargs={
'dir': tmp_path / 'object_store_backend',
'always_fail': True,
},
num_concurrent_uploads=1,
use_procs=False,
upload_staging_folder=str(tmp_path / 'staging_folder'),
num_attempts=1,
)
# Enqueue a file. Because `always_fail` is True, it will cause the worker to crash
tmpfile_path = tmp_path / 'dummy_file'
with open(tmpfile_path, 'w+') as f:
f.write('hi')
logger = Logger(dummy_state, destinations=[remote_uploader_downloader])
remote_uploader_downloader.run_event(Event.INIT, dummy_state, logger)
logger.upload_file('dummy_remote_file_name', tmpfile_path)
# Wait enough time for the file to be enqueued
time.sleep(0.5)
# Assert that the worker crashed
with pytest.raises(RuntimeError):
remote_uploader_downloader.run_event(Event.EPOCH_END, dummy_state, logger)
# Enqueue the file again to ensure that the buffers are dirty
logger.upload_file('dummy_remote_file_name', tmpfile_path)
# Shutdown the logger. This should not hang or cause any exception
remote_uploader_downloader.close(dummy_state, logger=logger)
with pytest.warns(
RuntimeWarning,
match=
r'The following objects may not have been uploaded, likely due to a worker crash: dummy_remote_file_name'
):
remote_uploader_downloader.post_close()
def test_valid_backend_names():
valid_backend_names = ['s3', 'libcloud', 'sftp']
with patch('composer.loggers.remote_uploader_downloader.S3ObjectStore') as _, \
patch('composer.loggers.remote_uploader_downloader.SFTPObjectStore') as _, \
patch('composer.loggers.remote_uploader_downloader.LibcloudObjectStore') as _:
for name in valid_backend_names:
remote_uploader_downloader = RemoteUploaderDownloader(bucket_uri=f'{name}://not-a-real-bucket')
# Access the remote_backend property so that it is built
_ = remote_uploader_downloader.remote_backend
with pytest.raises(ValueError):
remote_uploader_downloader = RemoteUploaderDownloader(bucket_uri='magicloud://not-a-real-bucket')
# Access the remote_backend property so that it is built
_ = remote_uploader_downloader.remote_backend
# We put this filter here because when the worker raises an exception, pytest throws a warning which fails the test.
@pytest.mark.filterwarnings(r'ignore:Exception in thread:pytest.PytestUnhandledThreadExceptionWarning')
def test_exception_queue_works(tmp_path: pathlib.Path, dummy_state: State):
"""Test that exceptions get put on the exception queue and get thrown"""
with patch('composer.loggers.remote_uploader_downloader.S3ObjectStore', DummyObjectStore):
# Create the object store logger
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri=f"s3://{tmp_path}/'object_store_backend",
backend_kwargs={
'dir': tmp_path / 'object_store_backend',
'always_fail': True,
},
num_concurrent_uploads=1,
use_procs=False,
upload_staging_folder=str(tmp_path / 'staging_folder'),
num_attempts=1,
)
# Enqueue a file. Because `always_fail` is True, it will cause the worker to crash
tmpfile_path = tmp_path / 'dummy_file'
with open(tmpfile_path, 'w+') as f:
f.write('hi')
logger = Logger(dummy_state, destinations=[remote_uploader_downloader])
remote_uploader_downloader.run_event(Event.INIT, dummy_state, logger)
logger.upload_file('dummy_remote_file_name', tmpfile_path)
# Wait enough time for the file to be enqueued and the exception to be enqueued
time.sleep(2.0)
# Make sure the exception got enqueued.
assert not remote_uploader_downloader._exception_queue.empty()
# Assert that the worker crashed with the worker's error not the general
# 'Upload worker crashed. Please check the logs.' error.
with pytest.raises(RuntimeError, match='Crash because you set*'):
remote_uploader_downloader.run_event(Event.EPOCH_END, dummy_state, logger)
| composer-dev | tests/loggers/test_remote_uploader_downloader.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import imghdr
import json
import os
import pathlib
import pickle
import uuid
from pathlib import Path
from typing import Sequence, Type
import pytest
import torch
from torch.utils.data import DataLoader
from composer.core import Engine, Event
from composer.core.callback import Callback
from composer.core.state import State
from composer.loggers import InMemoryLogger
from composer.loggers.logger import Logger
from composer.loggers.wandb_logger import WandBLogger
from composer.trainer import Trainer
from composer.utils import dist, retry
from tests.callbacks.callback_settings import get_cb_kwargs, get_cbs_and_marks
from tests.common import RandomClassificationDataset, SimpleModel
from tests.common.datasets import RandomImageDataset
from tests.common.models import SimpleConvModel
@pytest.fixture
def test_wandb_logger(tmp_path, dummy_state):
pytest.importorskip('wandb', reason='wandb is optional')
os.environ['WANDB_DIR'] = str(tmp_path)
os.environ['WANDB_MODE'] = 'offline'
dummy_state.run_name = 'wandb-test-log-image'
logger = Logger(dummy_state, [])
wandb_logger = WandBLogger()
wandb_logger.init(dummy_state, logger)
return wandb_logger
def test_wandb_log_image(test_wandb_logger):
pytest.importorskip('wandb', reason='wandb is optional')
# We group all the image size variants into one test because calling wandb.init() is slow
image_variants = [
(torch.rand(4, 4), False), # 2D image
(torch.rand(2, 3, 4, 4), False), # multiple images, not channels last
(torch.rand(3, 4, 4), False), # with channels, not channels last
([torch.rand(4, 4, 3)], True), # with channels, channels last
(torch.rand(2, 4, 4, 3), True), # multiple images, channels last
([torch.rand(4, 4, 3), torch.rand(4, 4, 3)], True) # multiple images in list
]
expected_num_images_total = 0
for (images, channels_last) in image_variants:
if isinstance(images, Sequence):
expected_num_images = len(images)
np_images = [image.numpy() for image in images]
else:
expected_num_images = 1 if images.ndim < 4 else images.shape[0]
np_images = images.numpy()
test_wandb_logger.log_images(images=images, channels_last=channels_last)
test_wandb_logger.log_images(images=np_images, channels_last=channels_last)
expected_num_images *= 2 # One set of torch tensors, one set of numpy arrays
expected_num_images_total += expected_num_images
test_wandb_logger.post_close()
img_dir = str(Path(test_wandb_logger.run_dir) / Path('media/images'))
imgs = [filename for filename in os.listdir(img_dir) if imghdr.what(img_dir + '/' + filename) == 'png']
actual_num_images = len(imgs)
assert actual_num_images == expected_num_images_total
@pytest.mark.parametrize(
'images,channels_last',
[
(torch.rand(32), False),
(torch.rand(32, 0), False), # Has zero in dimension.
(torch.rand(4, 4, 8, 32, 32), False), # > 4 dim.
([torch.rand(4, 32, 32, 3)], True),
]) # sequence > 3 dim.
def test_wandb_ml_log_image_errors_out(test_wandb_logger, images, channels_last):
pytest.importorskip('wandb', reason='wandb is optional')
with pytest.raises(ValueError):
test_wandb_logger.log_images(images, channels_last=channels_last)
def test_wandb_log_image_with_masks(test_wandb_logger):
pytest.importorskip('wandb', reason='wandb is optional')
# We group all the image size variants into one test because calling comet_experiment.end() is slow
image_variants = [
# single image, single mask, channels last
(torch.randint(0, 256, (4, 4, 3)), {
'pred': torch.randint(0, 10, (4, 4))
}, True),
# multiple images, single mask, channels last
(torch.rand(2, 4, 4, 3), {
'pred': torch.randint(0, 10, (2, 4, 4))
}, True),
# multiple images, multiple masks, channels last
(torch.rand(2, 4, 4, 3), {
'pred': torch.randint(0, 10, (2, 4, 4)),
'pred2': torch.randint(0, 10, (2, 4, 4))
}, True),
# single image, single mask, not channels last
(torch.randint(0, 256, (3, 4, 4)), {
'pred': torch.randint(0, 10, (4, 4))
}, False),
# multiple images, single mask, not channels last
(torch.rand(2, 3, 4, 4), {
'pred': torch.randint(0, 10, (2, 4, 4))
}, False),
# multiple images, multiple masks, not channels last
(torch.rand(2, 3, 4, 4), {
'pred': torch.randint(0, 10, (2, 4, 4)),
'pred2': torch.randint(0, 10, (2, 4, 4))
}, False)
]
expected_num_masks_total = 0
expected_num_images_total = 0
for (images, masks, channels_last) in image_variants:
num_masks = len(masks.keys())
expected_num_images = 1 if images.ndim < 4 else images.shape[0]
expected_num_masks = num_masks * expected_num_images
test_wandb_logger.log_images(images=images, masks=masks, channels_last=channels_last)
expected_num_images_total += expected_num_images
expected_num_masks_total += expected_num_masks
test_wandb_logger.post_close()
img_dir = str(Path(test_wandb_logger.run_dir) / Path('media/images'))
imgs = [
filename for filename in os.listdir(img_dir)
if not os.path.isdir(img_dir + '/' + filename) and imghdr.what(img_dir + '/' + filename) == 'png'
]
actual_num_images = len(imgs)
assert actual_num_images == expected_num_images_total
mask_dir = str(Path(test_wandb_logger.run_dir) / Path('media/images/mask'))
masks = [filename for filename in os.listdir(mask_dir) if imghdr.what(mask_dir + '/' + filename) == 'png']
actual_num_masks = len(masks)
assert actual_num_masks == expected_num_masks_total
@pytest.mark.parametrize('images,masks', [(torch.randint(0, 256, (32, 32, 3)), {
'pred': torch.randint(0, 10, (32, 32))
})])
def test_wandb_log_image_with_masks_and_table(images, masks, test_wandb_logger):
wandb = pytest.importorskip('wandb', reason='wandb is optional')
expected_num_images = 1 if images.ndim < 4 else images.shape[0]
assert wandb.run is not None
wandb_run_dir = Path(wandb.run.dir)
test_wandb_logger.log_images(images=images, masks=masks, channels_last=True, use_table=True)
test_wandb_logger.post_close()
wandb_media_dir = wandb_run_dir.parent / Path('files') / Path('media') / Path('table')
image_table_files = wandb_media_dir.glob('./*.json')
image_count = 0
for image_table_file in image_table_files:
table_columns = json.load(open(image_table_file.absolute()))['data']
num_images = sum([1 for column in table_columns if column[0] == 'Image'])
image_count += num_images
assert image_count == expected_num_images
def test_wandb_log_metrics(test_wandb_logger):
wandb = pytest.importorskip('wandb', reason='wandb is optional')
dataset_size = 40
batch_size = 4
trainer = Trainer(model=SimpleConvModel(),
loggers=test_wandb_logger,
train_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
eval_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
max_duration='1ep')
trainer.fit()
wandb_run_dir = Path(wandb.run.dir)
run_file = wandb_run_dir.parent / Path(f'run-{wandb.run.id}.wandb')
# delete trainer to force WandBLogger to clean up in post_close
del trainer
# Note, it is not clear how to correctly load this file, so we are just loading it as text
# and searching the text for expected strings
with open(run_file, encoding='latin-1') as _wandb_file:
all_run_text = _wandb_file.read()
train_metrics_accuracy_count = all_run_text.count('metrics/train/MulticlassAccuracy')
eval_metrics_accuracy_count = all_run_text.count('metrics/eval/MulticlassAccuracy')
eval_metrics_cross_entropy_count = all_run_text.count('metrics/eval/CrossEntropy')
train_loss_count = all_run_text.count('loss/train/total')
expected_number_train_loss_count = (dataset_size / batch_size) + 1 # wandb includes it in the file one extra time
expected_number_train_metrics_count = (dataset_size /
batch_size) + 2 # wandb includes it in the file two extra times
expected_number_eval_metrics_count = 2 # wandb includes it in the file twice
assert train_metrics_accuracy_count == expected_number_train_metrics_count
assert train_loss_count == expected_number_train_loss_count
assert eval_metrics_accuracy_count == expected_number_eval_metrics_count
assert eval_metrics_cross_entropy_count == expected_number_eval_metrics_count
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True))
def test_logged_data_is_json_serializable(callback_cls: Type[Callback]):
"""Test that all logged data is json serializable, which is a requirement to use wandb."""
pytest.importorskip('wandb', reason='wandb is optional')
from wandb.sdk.data_types.base_types.wb_value import WBValue
callback_kwargs = get_cb_kwargs(callback_cls)
callback = callback_cls(**callback_kwargs)
logger = InMemoryLogger() # using an in memory logger to manually validate json serializability
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='1ep',
callbacks=callback,
loggers=logger,
)
trainer.fit()
for log_calls in logger.data.values():
for timestamp, data in log_calls:
del timestamp # unused
# manually filter out custom W&B data types and tensors, which are allowed, but cannot be json serialized
if isinstance(data, (WBValue, torch.Tensor)):
continue
json.dumps(data)
def test_wandb_is_pickleable_when_disabled(dummy_state: State):
pytest.importorskip('wandb', reason='wandb is optional')
original_wandb_mode = os.environ.get('WANDB_MODE', None)
os.environ['WANDB_MODE'] = 'disabled'
wandb_logger = WandBLogger()
# Need to initialize WandbLogger before calling .state_dict()
dummy_state.callbacks.append(wandb_logger)
logger = Logger(dummy_state, [wandb_logger])
engine = Engine(dummy_state, logger)
engine.run_event(Event.INIT)
# Just make sure this doesn't crash due to wandb.sdk.lib.disabled.RunDisabled not being pickleable
pickle.loads(pickle.dumps(wandb_logger.state_dict()))
# reset wandb mode
if original_wandb_mode is None:
del os.environ['WANDB_MODE']
else:
os.environ['WANDB_MODE'] = original_wandb_mode
@pytest.mark.world_size(2)
@pytest.mark.parametrize('rank_zero_only', [True, False])
@pytest.mark.skip('This test needs to be refactored to use a Mock API interface.')
def test_wandb_artifacts(rank_zero_only: bool, tmp_path: pathlib.Path, dummy_state: State):
"""Test that wandb artifacts logged on rank zero are accessible by all ranks."""
pytest.importorskip('wandb', reason='wandb is optional')
# Create the logger
ctx = pytest.warns(
UserWarning, match='`rank_zero_only` should be set to False.') if rank_zero_only else contextlib.nullcontext()
with ctx:
wandb_logger = WandBLogger(
rank_zero_only=rank_zero_only,
log_artifacts=True,
)
dummy_state.callbacks.append(wandb_logger)
logger = Logger(dummy_state, [wandb_logger])
engine = Engine(dummy_state, logger)
engine.run_event(Event.INIT)
# Distribute the artifact name from rank 0 to all ranks
wandb_artifact_name = 'test-wandb-artifact-' + str(uuid.uuid4())
wandb_artifact_name_list = [wandb_artifact_name]
dist.broadcast_object_list(wandb_artifact_name_list)
wandb_artifact_name = wandb_artifact_name_list[0]
if dist.get_global_rank() == 0:
# Create a dummy artifact
dummy_wandb_artifact_path = tmp_path / 'wandb_artifact.txt'
with open(dummy_wandb_artifact_path, 'w+') as f:
f.write('hello!')
# Log a wandb artifact if rank zero
logger.upload_file(
file_path=dummy_wandb_artifact_path,
remote_file_name=wandb_artifact_name,
)
# Wait for rank 0 queue the file upload
dist.barrier()
# Attempt to retrieve the artifact on all ranks
downloaded_wandb_artifact_path = tmp_path / 'downloaded_wandb_artifact'
@retry(FileNotFoundError, num_attempts=6) # 6 attempts is ~2^(6-1) seconds max wait
def _validate_wandb_artifact():
wandb_logger.download_file(wandb_artifact_name, str(downloaded_wandb_artifact_path))
with open(downloaded_wandb_artifact_path, 'r') as f:
assert f.read() == 'hello!'
_validate_wandb_artifact()
| composer-dev | tests/loggers/test_wandb_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pathlib
from composer.core.state import State
from composer.loggers import Logger, LoggerDestination
def test_logger_file_upload(dummy_state: State):
file_logged = False
class DummyLoggerDestination(LoggerDestination):
def upload_file(self, state: State, remote_file_name: str, file_path: pathlib.Path, *, overwrite: bool):
nonlocal file_logged
file_logged = True
assert remote_file_name == 'foo'
assert file_path.name == 'bar'
assert overwrite
logger = Logger(state=dummy_state, destinations=[DummyLoggerDestination()])
logger.upload_file(
remote_file_name='foo',
file_path='bar',
overwrite=True,
)
assert file_logged
| composer-dev | tests/loggers/test_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from composer.core import State, Time, Timestamp
from composer.loggers import InMemoryLogger, Logger
def test_in_memory_logger(dummy_state: State):
in_memory_logger = InMemoryLogger()
logger = Logger(dummy_state, destinations=[in_memory_logger])
in_memory_logger.init(dummy_state, logger)
logger.log_metrics({'epoch': 2.2})
dummy_state.timestamp = dummy_state.timestamp.to_next_batch(samples=1, tokens=1)
logger.log_metrics({'epoch': 3.3})
# no batch events should be logged, since the level is epoch
assert len(in_memory_logger.data['epoch']) == 2
# `in_memory_logger.data` should contain everything
timestamp, data = in_memory_logger.data['epoch'][0]
assert timestamp.batch == 0
assert data == 2.2
timestamp, data = in_memory_logger.data['epoch'][1]
assert timestamp.batch == 1
assert data == 3.3
# the most recent values should have just the last call to epoch
assert in_memory_logger.most_recent_values['epoch'] == 3.3
assert in_memory_logger.most_recent_timestamps['epoch'].batch == 1
def test_in_memory_logger_get_timeseries(minimal_state: State, empty_logger: Logger):
in_memory_logger = InMemoryLogger()
state = minimal_state
logger = empty_logger
in_memory_logger.init(state, logger)
data = {'accuracy/val': [], 'batch': [], 'batch_in_epoch': []}
for i in range(10):
batch = i
batch_in_epoch = i % 3
timestamp = Timestamp(
epoch=Time(0, 'ep'),
batch=Time(batch, 'ba'),
batch_in_epoch=Time(batch_in_epoch, 'ba'),
sample=Time(0, 'sp'),
sample_in_epoch=Time(0, 'sp'),
token=Time(0, 'tok'),
token_in_epoch=Time(0, 'tok'),
)
assert in_memory_logger.state is not None
in_memory_logger.state.timestamp = timestamp
datapoint = i / 3
in_memory_logger.log_metrics({'accuracy/val': datapoint}, step=state.timestamp.batch.value)
data['accuracy/val'].append(datapoint)
data['batch'].append(batch)
data['batch_in_epoch'].append(batch_in_epoch)
timeseries = in_memory_logger.get_timeseries('accuracy/val')
for k, v in data.items():
assert np.all(timeseries[k] == np.array(v))
| composer-dev | tests/loggers/test_in_memory_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import sys
from torch.utils.data import DataLoader
from composer import Callback, Event, State, Trainer
from composer.loggers import FileLogger, Logger, LoggerDestination
from composer.utils.collect_env import disable_env_report
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
class FileUploaderTracker(LoggerDestination):
def __init__(self) -> None:
self.uploaded_files = []
def upload_file(self, state: State, remote_file_name: str, file_path: pathlib.Path, *, overwrite: bool):
del state, overwrite # unused
self.uploaded_files.append((remote_file_name, file_path))
def test_file_logger(dummy_state: State, tmp_path: pathlib.Path):
log_file_name = os.path.join(tmp_path, 'output.log')
log_destination = FileLogger(
filename=log_file_name,
remote_file_name='{run_name}/rank{rank}.log',
buffer_size=1,
flush_interval=1,
)
file_tracker_destination = FileUploaderTracker()
logger = Logger(dummy_state, destinations=[log_destination, file_tracker_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
logger.log_hyperparameters({'foo': 3})
logger.log_metrics({'loss': 2}, step=1)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
assert f.readlines() == [
'[hyperparameter]: foo: 3 \n',
'[metric][batch=1]: loss: 2 \n',
]
def test_file_logger_capture_stdout_stderr(dummy_state: State, tmp_path: pathlib.Path):
log_file_name = os.path.join(tmp_path, 'output.log')
log_destination = FileLogger(filename=log_file_name,
buffer_size=1,
flush_interval=1,
capture_stderr=True,
capture_stdout=True)
# capturing should start immediately
print('Hello, stdout!\nExtra Line')
print('Hello, stderr!\nExtra Line2', file=sys.stderr)
logger = Logger(dummy_state, destinations=[log_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
assert f.readlines() == [
'[stdout]: Hello, stdout!\n',
'[stdout]: Extra Line\n',
'[stderr]: Hello, stderr!\n',
'[stderr]: Extra Line2\n',
]
class ExceptionRaisingCallback(Callback):
def fit_start(self, state: State, logger: Logger) -> None:
del state, logger # unused
raise RuntimeError('My Exception!')
def test_exceptions_are_printed(tmp_path: pathlib.Path):
# Test that exceptions are printed to stderr, which is captured by the file logger
# The file logger stops capturing stdout/stderr when it is closed
# Here, we construct a trainer that raises an exception on Event.FIT_START
# and assert that the exception is written to the logfile
exception_raising_callback = ExceptionRaisingCallback()
logfile_name = str(tmp_path / 'logfile.txt')
file_logger = FileLogger(filename=logfile_name, capture_stderr=True)
dataloader = DataLoader(RandomClassificationDataset())
model = SimpleModel()
trainer = Trainer(model=model,
train_dataloader=dataloader,
max_duration=1,
callbacks=[exception_raising_callback],
loggers=[file_logger])
disable_env_report() # Printing the full report in this test can cause timeouts
# manually calling `sys.excepthook` for the exception, as it is impossible to write a test
# that validates unhandled exceptions are logged, since the test validation code would by definition
# need to handle the exception!
try:
trainer.fit()
except RuntimeError:
exc_type, exc_value, tb = sys.exc_info()
assert exc_type is not None
assert exc_value is not None
assert tb is not None
sys.excepthook(exc_type, exc_value, tb)
trainer.close()
with open(logfile_name, 'r') as f:
log_lines = f.readlines()
assert '[stderr]: RuntimeError: My Exception!\n' == log_lines[-1]
# Since the trainer was closed, future prints should not appear in the file logger
print('SHOULD NOT BE CAPTURED')
with open(logfile_name, 'r') as f:
logfile = f.read()
assert 'SHOULD NOT BE CAPTURED' not in logfile
| composer-dev | tests/loggers/test_file_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""
Test inference APIs.
"""
import os
import tempfile
from functools import partial
from unittest.mock import patch
import pytest
from torch.utils.data import DataLoader
from composer.callbacks import ExportForInferenceCallback, export_for_inference
from composer.models import composer_resnet
from composer.trainer import Trainer
from tests.common.datasets import RandomImageDataset
@pytest.mark.parametrize(
'model_cls',
[partial(composer_resnet, 'resnet18')],
)
def test_inference_callback_torchscript(model_cls):
with patch('composer.callbacks.export_for_inference.export_with_logger'):
save_format = 'torchscript'
model = model_cls()
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.pt')
exp_for_inf_callback = ExportForInferenceCallback(save_format=save_format, save_path=str(save_path))
# Construct the trainer and train
trainer = Trainer(
model=model,
callbacks=exp_for_inf_callback,
train_dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
max_duration='1ba',
)
trainer.fit()
# Assert export_for_inference utility called with expected inputs
export_for_inference.export_with_logger.assert_called_once_with(
model=model,
save_format=save_format,
save_path=save_path,
logger=trainer.logger,
save_object_store=None,
sample_input=(exp_for_inf_callback.sample_input, {}),
transforms=None)
@pytest.mark.parametrize(
'model_cls',
[partial(composer_resnet, 'resnet18')],
)
def test_inference_callback_onnx(model_cls):
with patch('composer.callbacks.export_for_inference.export_with_logger'):
save_format = 'onnx'
model = model_cls()
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.onnx')
exp_for_inf_callback = ExportForInferenceCallback(save_format=save_format, save_path=str(save_path))
# Construct the trainer and train
trainer = Trainer(model=model,
callbacks=exp_for_inf_callback,
train_dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
max_duration='1ba')
trainer.fit()
# Assert export_for_inference utility called with expected inputs
export_for_inference.export_with_logger.assert_called_once_with(
model=model,
save_format=save_format,
save_path=save_path,
logger=trainer.logger,
save_object_store=None,
sample_input=(exp_for_inf_callback.sample_input, {}),
transforms=None)
| composer-dev | tests/callbacks/test_inference.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import imghdr
import json
import math
import os
import zipfile
from pathlib import Path
import pytest
from torch.utils.data import DataLoader
from composer.callbacks import ImageVisualizer
from composer.core import TimeUnit
from composer.loggers import WandBLogger
from composer.loggers.logger import Logger
from composer.trainer import Trainer
from tests.common.datasets import RandomImageDataset, RandomSegmentationDataset
from tests.common.models import SimpleConvModel, SimpleSegmentationModel
@pytest.fixture
def test_wandb_logger(tmp_path, dummy_state):
pytest.importorskip('wandb', reason='wandb is optional')
os.environ['WANDB_DIR'] = str(tmp_path)
os.environ['WANDB_MODE'] = 'offline'
dummy_state.run_name = 'wand-test-log-image'
logger = Logger(dummy_state, [])
wandb_logger = WandBLogger()
wandb_logger.init(dummy_state, logger)
return wandb_logger
@pytest.fixture
def comet_offline_directory(tmp_path):
return str(tmp_path / Path('my_cometml_runs'))
@pytest.fixture
def comet_logger(monkeypatch, comet_offline_directory):
comet_ml = pytest.importorskip('comet_ml', reason='comet_ml is optional')
monkeypatch.setattr(comet_ml, 'Experiment', comet_ml.OfflineExperiment)
from composer.loggers import CometMLLogger
# Set offline directory.
os.environ['COMET_OFFLINE_DIRECTORY'] = comet_offline_directory
comet_logger = CometMLLogger()
return comet_logger
@pytest.mark.parametrize('interval', ['9ba', '2ep', '7ep'])
def test_image_visualizer_with_wandb(test_wandb_logger, interval):
wandb = pytest.importorskip('wandb', reason='wandb is optional')
image_visualizer = ImageVisualizer(interval=interval)
dataset_size = 40
batch_size = 4
images_per_table = batch_size if batch_size < image_visualizer.num_images else image_visualizer.num_images
max_duration = 9
trainer = Trainer(model=SimpleConvModel(),
callbacks=image_visualizer,
loggers=test_wandb_logger,
train_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
eval_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
max_duration=f'{max_duration}ep')
trainer.fit()
num_train_steps = int(trainer.state.timestamp.batch)
assert wandb.run is not None
wandb_run_dir = Path(wandb.run.dir)
# delete trainer to force WandBLogger to clean up in post_close
del trainer
wandb_media_dir = wandb_run_dir.parent / Path('files') / Path('media') / Path('table') / Path('Images')
image_table_files = wandb_media_dir.glob('./*.json')
train_image_count, eval_image_count = 0, 0
for image_table_file in image_table_files:
table_columns = json.load(open(image_table_file.absolute()))['data']
num_images = sum([1 for column in table_columns if column[0] == 'Image'])
if str(image_table_file.name).startswith('Train'):
train_image_count += num_images
elif str(image_table_file.name).startswith('Eval'):
eval_image_count += num_images
num_train_epochs = max_duration
expected_number_train_tables = (math.ceil(num_train_steps /
image_visualizer.interval.value) if image_visualizer.interval.unit
== TimeUnit.BATCH else math.ceil(num_train_epochs /
image_visualizer.interval.value))
expected_number_eval_tables = max_duration
expected_number_train_images = expected_number_train_tables * images_per_table
expected_number_eval_images = expected_number_eval_tables * images_per_table
assert train_image_count == expected_number_train_images
assert eval_image_count == expected_number_eval_images
def test_image_visualizer_with_comet(comet_offline_directory, comet_logger):
pytest.importorskip('comet_ml', reason='comet_ml is optional')
image_interval = 2
image_visualizer = ImageVisualizer(interval=f'{image_interval}ba')
dataset_size = 40
batch_size = 4
max_duration = 6
eval_interval = 6
trainer = Trainer(model=SimpleConvModel(),
callbacks=image_visualizer,
loggers=comet_logger,
train_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
eval_dataloader=DataLoader(RandomImageDataset(size=dataset_size), batch_size),
eval_interval=f'{eval_interval}ba',
max_duration=f'{max_duration}ba')
trainer.fit()
comet_logger.experiment.end()
expected_number_train_images = int((batch_size * max_duration) / image_interval)
expected_number_eval_images = int((max_duration / eval_interval) * batch_size)
# Extract all files saved to comet offline directory.
assert comet_logger.experiment is not None
comet_exp_dump_filepath = str(Path(comet_offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
zf.extractall(comet_offline_directory)
# Count the number of files that are images.
actual_num_images = 0
for filename in Path(comet_offline_directory).iterdir():
file_path = str(Path(comet_offline_directory) / Path(filename))
if imghdr.what(file_path) == 'png':
actual_num_images += 1
assert actual_num_images == expected_number_train_images + expected_number_eval_images
def test_image_visualizer_segmentation_with_wandb(test_wandb_logger):
wandb = pytest.importorskip('wandb', reason='wandb is optional')
image_interval = 2
image_visualizer = ImageVisualizer(interval=f'{image_interval}ba', mode='segmentation')
dataset_size = 40
batch_size = 4
max_duration = 8
eval_interval = 4
num_classes = 2
num_channels = 3
trainer = Trainer(model=SimpleSegmentationModel(num_channels=num_channels, num_classes=num_classes),
callbacks=image_visualizer,
loggers=test_wandb_logger,
train_dataloader=DataLoader(
RandomSegmentationDataset(size=dataset_size, shape=(3, 8, 8), num_classes=num_classes),
batch_size),
eval_dataloader=DataLoader(
RandomSegmentationDataset(size=dataset_size, shape=(3, 8, 8), num_classes=num_classes),
batch_size),
eval_interval=f'{eval_interval}ba',
max_duration=f'{max_duration}ba')
trainer.fit()
assert wandb.run is not None
wandb_run_dir = Path(wandb.run.dir)
# delete trainer to force WandBLogger to clean up in post_close
del trainer
wandb_media_dir = wandb_run_dir.parent / Path('files') / Path('media') / Path('table') / Path('Images')
image_table_files = wandb_media_dir.glob('./*.json')
train_image_count, eval_image_count = 0, 0
for image_table_file in image_table_files:
table_columns = json.load(open(image_table_file.absolute()))['data']
num_images = sum([1 for column in table_columns if column[0] == 'Image'])
if str(image_table_file.name).startswith('Train'):
train_image_count += num_images
elif str(image_table_file.name).startswith('Eval'):
eval_image_count += num_images
expected_number_train_images = (max_duration / image_interval) * batch_size
expected_number_eval_images = (max_duration / eval_interval) * batch_size
assert train_image_count == expected_number_train_images
assert eval_image_count == expected_number_eval_images
def test_image_visualizer_segmentation_with_comet(comet_offline_directory, comet_logger):
pytest.importorskip('comet_ml', reason='comet_ml is optional')
image_interval = 2
image_visualizer = ImageVisualizer(interval=f'{image_interval}ba', mode='segmentation')
dataset_size = 40
batch_size = 4
max_duration = 6
eval_interval = 6
num_classes = 2
num_channels = 3
num_masks = 2
trainer = Trainer(model=SimpleSegmentationModel(num_channels=num_channels, num_classes=num_classes),
callbacks=image_visualizer,
loggers=comet_logger,
train_dataloader=DataLoader(
RandomSegmentationDataset(size=dataset_size, shape=(3, 32, 32), num_classes=num_classes),
batch_size),
eval_dataloader=DataLoader(
RandomSegmentationDataset(size=dataset_size, shape=(3, 32, 32), num_classes=num_classes),
batch_size),
eval_interval=f'{eval_interval}ba',
max_duration=f'{max_duration}ba')
trainer.fit()
# delete trainer to force WandBLogger to clean up in post_close
comet_logger.experiment.end()
expected_number_train_images = (batch_size * max_duration) / image_interval
expected_number_eval_images = (max_duration / eval_interval) * batch_size
num_additional_images_per_mask = 2 # Mask overlaid on image + mask by itself.
expected_num_masks = num_masks * num_additional_images_per_mask * (expected_number_train_images +
expected_number_eval_images)
# Extract all files saved to comet offline directory.
assert comet_logger.experiment is not None
comet_exp_dump_filepath = str(Path(comet_offline_directory) / Path(comet_logger.experiment.id).with_suffix('.zip'))
zf = zipfile.ZipFile(comet_exp_dump_filepath)
zf.extractall(comet_offline_directory)
# Count the number of files that are images.
actual_num_images = 0
for filename in Path(comet_offline_directory).iterdir():
file_path = str(Path(comet_offline_directory) / Path(filename))
if imghdr.what(file_path) == 'png':
actual_num_images += 1
assert actual_num_images == expected_number_train_images + expected_number_eval_images + expected_num_masks
| composer-dev | tests/callbacks/test_image_visualizer.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Type, cast
import pytest
from torch.utils.data import DataLoader
from composer.core import Callback, Engine, Event, State
from composer.core.time import Time
from composer.loggers import Logger, LoggerDestination
from composer.profiler import Profiler, ProfilerAction
from composer.trainer import Trainer
from tests.callbacks.callback_settings import get_cb_kwargs, get_cbs_and_marks
from tests.common import EventCounterCallback
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
def test_callbacks_map_to_events():
# callback methods must be 1:1 mapping with events
# exception for private methods
cb = Callback()
excluded_methods = ['state_dict', 'load_state_dict', 'run_event', 'close', 'post_close']
methods = set(m for m in dir(cb) if (m not in excluded_methods and not m.startswith('_')))
event_names = set(e.value for e in Event)
assert methods == event_names
@pytest.mark.parametrize('event', list(Event))
def test_run_event_callbacks(event: Event, dummy_state: State):
callback = EventCounterCallback()
logger = Logger(dummy_state)
dummy_state.callbacks = [callback]
engine = Engine(state=dummy_state, logger=logger)
engine.run_event(event)
assert callback.event_to_num_calls[event] == 1
@pytest.mark.parametrize('cb_cls', get_cbs_and_marks(callbacks=True, loggers=True, profilers=True))
class TestCallbacks:
@classmethod
def setup_class(cls):
pytest.importorskip('wandb', reason='WandB is optional.')
def test_callback_is_constructable(self, cb_cls: Type[Callback]):
cb_kwargs = get_cb_kwargs(cb_cls)
cb = cb_cls(**cb_kwargs)
assert isinstance(cb_cls, type)
assert isinstance(cb, cb_cls)
def test_multiple_fit_start_and_end(self, cb_cls: Type[Callback], dummy_state: State):
"""Test that callbacks do not crash when Event.FIT_START and Event.FIT_END is called multiple times."""
cb_kwargs = get_cb_kwargs(cb_cls)
dummy_state.callbacks.append(cb_cls(**cb_kwargs))
dummy_state.profiler = Profiler(schedule=lambda _: ProfilerAction.SKIP, trace_handlers=[])
dummy_state.profiler.bind_to_state(dummy_state)
logger = Logger(dummy_state)
engine = Engine(state=dummy_state, logger=logger)
engine.run_event(Event.INIT) # always runs just once per engine
engine.run_event(Event.FIT_START)
engine.run_event(Event.FIT_END)
engine.run_event(Event.FIT_START)
engine.run_event(Event.FIT_END)
def test_idempotent_close(self, cb_cls: Type[Callback], dummy_state: State):
"""Test that callbacks do not crash when .close() and .post_close() are called multiple times."""
cb_kwargs = get_cb_kwargs(cb_cls)
dummy_state.callbacks.append(cb_cls(**cb_kwargs))
dummy_state.profiler = Profiler(schedule=lambda _: ProfilerAction.SKIP, trace_handlers=[])
dummy_state.profiler.bind_to_state(dummy_state)
logger = Logger(dummy_state)
engine = Engine(state=dummy_state, logger=logger)
engine.run_event(Event.INIT) # always runs just once per engine
engine.close()
engine.close()
def test_multiple_init_and_close(self, cb_cls: Type[Callback], dummy_state: State):
"""Test that callbacks do not crash when INIT/.close()/.post_close() are called multiple times in that order."""
cb_kwargs = get_cb_kwargs(cb_cls)
dummy_state.callbacks.append(cb_cls(**cb_kwargs))
dummy_state.profiler = Profiler(schedule=lambda _: ProfilerAction.SKIP, trace_handlers=[])
dummy_state.profiler.bind_to_state(dummy_state)
logger = Logger(dummy_state)
engine = Engine(state=dummy_state, logger=logger)
engine.run_event(Event.INIT)
engine.close()
# For good measure, also test idempotent close, in case if there are edge cases with a second call to INIT
engine.close()
# Create a new engine, since the engine does allow events to run after it has been closed
engine = Engine(state=dummy_state, logger=logger)
engine.close()
# For good measure, also test idempotent close, in case if there are edge cases with a second call to INIT
engine.close()
@pytest.mark.parametrize('cb_cls', get_cbs_and_marks(callbacks=True, loggers=True, profilers=True))
# Parameterized across @pytest.mark.remote as some loggers (e.g. wandb) support integration testing
@pytest.mark.parametrize('device_train_microbatch_size,_remote',
[(1, False),
(2, False), pytest.param(1, True, marks=pytest.mark.remote)])
@pytest.mark.filterwarnings(r'ignore:The profiler is enabled:UserWarning')
class TestCallbackTrains:
def _get_trainer(self, cb: Callback, device_train_microbatch_size: int):
loggers = cb if isinstance(cb, LoggerDestination) else None
callbacks = cb if not isinstance(cb, LoggerDestination) else None
batch_size = 2
return Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset(size=4), batch_size=batch_size),
eval_dataloader=DataLoader(RandomClassificationDataset(size=4), batch_size=batch_size),
max_duration=2,
device_train_microbatch_size=device_train_microbatch_size,
callbacks=callbacks,
loggers=loggers,
profiler=Profiler(schedule=lambda _: ProfilerAction.SKIP, trace_handlers=[]),
)
def test_trains(self, cb_cls: Type[Callback], device_train_microbatch_size: int, _remote: bool):
del _remote # unused. `_remote` must be passed through to parameterize the test markers.
cb_kwargs = get_cb_kwargs(cb_cls)
cb = cb_cls(**cb_kwargs)
trainer = self._get_trainer(cb, device_train_microbatch_size)
trainer.fit()
def test_trains_multiple_calls(self, cb_cls: Type[Callback], device_train_microbatch_size: int, _remote: bool):
"""
Tests that training with multiple fits complete.
Note: future functional tests should test for
idempotency (e.g functionally)
"""
del _remote # unused. `_remote` must be passed through to parameterize the test markers.
cb_kwargs = get_cb_kwargs(cb_cls)
cb = cb_cls(**cb_kwargs)
trainer = self._get_trainer(cb, device_train_microbatch_size)
trainer.fit()
assert trainer.state.max_duration is not None
trainer.state.max_duration = cast(Time[int], trainer.state.max_duration * 2)
trainer.fit()
| composer-dev | tests/callbacks/test_callbacks.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from packaging import version
from torch.utils.data import DataLoader
from composer.callbacks import OptimizerMonitor
from composer.loggers import InMemoryLogger
from composer.optim import DecoupledAdamW
from composer.trainer import Trainer
from composer.utils import dist
from tests.common import device, world_size
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
@pytest.mark.parametrize('log_optimizer_metrics', [True, False])
def test_optimizer_monitor(log_optimizer_metrics: bool):
# Construct the callback
grad_monitor = OptimizerMonitor(log_optimizer_metrics=log_optimizer_metrics)
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
model = SimpleModel()
# Construct the trainer and train
trainer = Trainer(
model=model,
callbacks=grad_monitor,
loggers=in_memory_logger,
train_dataloader=DataLoader(RandomClassificationDataset()),
optimizers=DecoupledAdamW(model.parameters()),
max_duration='3ba',
)
trainer.fit()
num_train_steps = int(trainer.state.timestamp.batch)
# Count the logged steps
grad_norm_calls = len(in_memory_logger.data['l2_norm/grad/global'])
layer_norm_calls = [len(calls) for (k, calls) in in_memory_logger.data.items() if 'l2_norm/grad' in k]
assert 'l2_norm/grad/module.2.weight' in in_memory_logger.data.keys()
if log_optimizer_metrics:
assert 'l2_norm/moment/module.2.weight' in in_memory_logger.data.keys()
assert 'cosine/moment_grad/module.2.weight' in in_memory_logger.data.keys()
assert 'l2_norm/second_moment_sqrt/module.2.weight' in in_memory_logger.data.keys()
assert 'l2_norm/update/module.2.weight' in in_memory_logger.data.keys()
assert 'cosine/update_grad/module.2.weight' in in_memory_logger.data.keys()
# Expected to log gradient norm once per step (total batch)
assert grad_norm_calls == num_train_steps
for num_calls in layer_norm_calls:
assert num_calls == num_train_steps
@device('gpu')
@world_size(1, 2)
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_optimizer_monitor(device, world_size):
# Construct the callback
grad_monitor = OptimizerMonitor(log_optimizer_metrics=True)
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
model = SimpleModel()
dataset = RandomClassificationDataset()
# Construct the trainer and train
trainer = Trainer(model=model,
callbacks=grad_monitor,
loggers=in_memory_logger,
train_dataloader=DataLoader(dataset, sampler=dist.get_sampler(dataset)),
optimizers=DecoupledAdamW(model.parameters()),
max_duration='3ba',
fsdp_config={
'sharding_strategy': 'FULL_SHARD',
'min_params': 10,
'cpu_offload': False,
'mixed_precision': 'PURE',
'backward_prefetch': 'BACKWARD_PRE',
'activation_checkpointing': False,
'activation_ocpu_offload': False,
'verbose': False
})
trainer.fit()
num_train_steps = int(trainer.state.timestamp.batch)
# Count the logged steps
grad_norm_calls = len(in_memory_logger.data['l2_norm/grad/global'])
layer_norm_calls = [len(calls) for (k, calls) in in_memory_logger.data.items() if 'l2_norm/grad' in k]
test_keys = [
'l2_norm/grad/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
'l2_norm/moment/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
'cosine/moment_grad/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
'l2_norm/second_moment_sqrt/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
'l2_norm/update/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
'cosine/update_grad/module._fsdp_wrapped_module._fpw_module.4._fsdp_wrapped_module.flat_param',
]
for key in test_keys:
assert key in in_memory_logger.data.keys()
# Expected to log gradient norm once per step (total batch)
assert grad_norm_calls == num_train_steps
for num_calls in layer_norm_calls:
assert num_calls == num_train_steps
| composer-dev | tests/callbacks/test_optimizer_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.utils.data import DataLoader
from composer.callbacks import MemoryMonitor
from composer.loggers import InMemoryLogger
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel, device
@device('cpu', 'gpu')
def test_memory_monitor_warnings_on_cpu_models(device: str):
# Error if the user sets device=cpu even when cuda is available
del device # unused. always using cpu
with pytest.warns(UserWarning, match='The memory monitor only works on CUDA devices'):
Trainer(
model=SimpleModel(),
callbacks=MemoryMonitor(),
device='cpu',
train_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='1ba',
)
@pytest.mark.gpu
def test_memory_monitor_gpu():
# Construct the trainer
memory_monitor = MemoryMonitor()
in_memory_logger = InMemoryLogger()
trainer = Trainer(
model=SimpleModel(),
callbacks=memory_monitor,
loggers=in_memory_logger,
train_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='1ba',
)
trainer.fit()
num_memory_monitor_calls = len(in_memory_logger.data['memory/allocated_mem'])
assert num_memory_monitor_calls == int(trainer.state.timestamp.batch)
| composer-dev | tests/callbacks/test_memory_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import datetime
from unittest.mock import MagicMock, patch
import pytest
from composer import Timestamp
from composer.callbacks import HealthChecker
from composer.callbacks.health_checker import GPUUtilization
from composer.utils import dist
from tests.common import world_size
pynvml = pytest.importorskip('pynvml')
pytest.importorskip('slack_sdk')
class MockUtil:
def __init__(self, util):
self.gpu = util
@pytest.mark.gpu
@world_size(1, 2)
def test_gpu_utilization(world_size):
assert HealthChecker._is_available()
gpu_utilization_values = [
MockUtil(100),
MockUtil(10),
MockUtil(100),
MockUtil(100),
MockUtil(100),
MockUtil(100),
]
with patch.multiple(pynvml,
nvmlDeviceGetUtilizationRates=MagicMock(side_effect=gpu_utilization_values),
nvmlDeviceGetCount=MagicMock(return_value=world_size)):
gpu_utilization = GPUUtilization()
gpu_utilization.sample()
gpu_utilization.sample()
gpu_utilization.sample()
_, alert = gpu_utilization.check()
should_alert = dist.get_local_rank() == 0 and world_size > 1
assert alert == should_alert
@pytest.mark.gpu
@world_size(1, 2)
def test_health_checker(world_size):
state = MagicMock()
state.run_name = 'pytest-mock-run-kwei73'
logger = MagicMock()
health_checker = HealthChecker(
sample_freq=1,
window_size=3,
wait=0,
)
gpu_utilization_values = [
MockUtil(100),
MockUtil(10),
MockUtil(100),
MockUtil(100),
MockUtil(100),
MockUtil(100),
]
with patch.multiple(pynvml,
nvmlDeviceGetUtilizationRates=MagicMock(side_effect=gpu_utilization_values),
nvmlDeviceGetCount=MagicMock(return_value=world_size)):
# collect data and checker
for seconds in [1, 2, 3]:
state.timestamp = Timestamp(total_wct=datetime.timedelta(seconds=seconds))
health_checker.after_train_batch(state, logger)
should_alert = dist.get_local_rank() == 0 and world_size > 1
assert health_checker.metrics[0].alerted == should_alert
def test_health_checker_sampling():
timestamp = Timestamp(total_wct=datetime.timedelta(seconds=0))
health_checker = HealthChecker(
sample_freq=1,
window_size=5,
wait=10,
)
config = [
(5, False), # before wait
(11, True),
(11.5, False), # below sample frequency
(12, True),
(20, True),
(11, False), # no time travel
]
for seconds, is_sample in config:
timestamp = Timestamp(total_wct=datetime.timedelta(seconds=seconds))
assert health_checker._sample(timestamp) == is_sample
| composer-dev | tests/callbacks/test_health_checker.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import collections.abc
import datetime
import pytest
from torch.utils.data import DataLoader
from composer.callbacks import SpeedMonitor
from composer.core import Time
from composer.loggers import InMemoryLogger
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
def _assert_no_negative_values(logged_values):
for timestamp, v in logged_values:
del timestamp # unused
if isinstance(v, Time):
assert int(v) >= 0
elif isinstance(v, datetime.timedelta):
assert v.total_seconds() >= 0
else:
assert v >= 0
@pytest.mark.parametrize('flops_per_batch', [False, True])
def test_speed_monitor(flops_per_batch: bool):
# Construct the callbacks
speed_monitor = SpeedMonitor(window_size=2)
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
model = SimpleModel()
if flops_per_batch:
model.flops_per_batch = lambda batch: len(batch) * 100.0
# Construct the trainer and train
trainer = Trainer(
model=model,
callbacks=speed_monitor,
loggers=in_memory_logger,
train_dataloader=DataLoader(RandomClassificationDataset()),
eval_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='1ep',
)
trainer.fit()
_assert_no_negative_values(in_memory_logger.data['wall_clock/train'])
_assert_no_negative_values(in_memory_logger.data['wall_clock/val'])
_assert_no_negative_values(in_memory_logger.data['wall_clock/total'])
_assert_no_negative_values(in_memory_logger.data['throughput/batches_per_sec'])
_assert_no_negative_values(in_memory_logger.data['throughput/samples_per_sec'])
_assert_no_negative_values(in_memory_logger.data['throughput/device/batches_per_sec'])
_assert_no_negative_values(in_memory_logger.data['throughput/device/samples_per_sec'])
if flops_per_batch:
_assert_no_negative_values(in_memory_logger.data['throughput/flops_per_sec'])
_assert_no_negative_values(in_memory_logger.data['throughput/device/flops_per_sec'])
assert isinstance(trainer.state.dataloader, collections.abc.Sized)
assert trainer.state.dataloader_label is not None
assert trainer.state.dataloader_len is not None
expected_step_calls = (trainer.state.dataloader_len - len(speed_monitor.history_samples) + 1) * int(
trainer.state.timestamp.epoch)
assert len(in_memory_logger.data['throughput/batches_per_sec']) == expected_step_calls
assert len(in_memory_logger.data['throughput/samples_per_sec']) == expected_step_calls
assert len(in_memory_logger.data['throughput/device/batches_per_sec']) == expected_step_calls
assert len(in_memory_logger.data['throughput/device/samples_per_sec']) == expected_step_calls
if flops_per_batch:
assert len(in_memory_logger.data['throughput/flops_per_sec']) == expected_step_calls
assert len(in_memory_logger.data['throughput/device/flops_per_sec']) == expected_step_calls
num_batches = int(trainer.state.timestamp.batch)
assert len(in_memory_logger.data['wall_clock/total']) == num_batches
assert len(in_memory_logger.data['wall_clock/train']) == num_batches
assert len(in_memory_logger.data['wall_clock/val']) == num_batches
| composer-dev | tests/callbacks/test_speed_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Any, Dict, List, Type
import pytest
import composer.callbacks
import composer.loggers
import composer.profiler
from composer import Callback
from composer.callbacks import EarlyStopper, ImageVisualizer, MemoryMonitor, SpeedMonitor, ThresholdStopper
from composer.callbacks.export_for_inference import ExportForInferenceCallback
from composer.callbacks.mlperf import MLPerfCallback
from composer.loggers import (CometMLLogger, ConsoleLogger, LoggerDestination, MLFlowLogger, ProgressBarLogger,
RemoteUploaderDownloader, TensorboardLogger, WandBLogger)
from tests.common import get_module_subclasses
try:
import wandb
_WANDB_INSTALLED = True
del wandb # unused
except ImportError:
_WANDB_INSTALLED = False
try:
import tensorboard
_TENSORBOARD_INSTALLED = True
del tensorboard # unused
except ImportError:
_TENSORBOARD_INSTALLED = False
try:
import comet_ml
_COMETML_INSTALLED = True
os.environ['COMET_API_KEY']
del comet_ml # unused
except ImportError:
_COMETML_INSTALLED = False
# If COMET_API_KEY not set.
except KeyError:
_COMETML_INSTALLED = False
try:
import mlperf_logging
_MLPERF_INSTALLED = True
del mlperf_logging
except ImportError:
_MLPERF_INSTALLED = False
try:
import mlflow
_MLFLOW_INSTALLED = True
del mlflow
except ImportError:
_MLFLOW_INSTALLED = False
try:
import libcloud
_LIBCLOUD_INSTALLED = True
del libcloud # unused
except ImportError:
_LIBCLOUD_INSTALLED = False
_callback_kwargs: Dict[Type[Callback], Dict[str, Any],] = {
RemoteUploaderDownloader: {
'bucket_uri': 'libcloud://.',
'backend_kwargs': {
'provider': 'local',
'container': '.',
'provider_kwargs': {
'key': '.',
},
},
'use_procs': False,
'num_concurrent_uploads': 1,
},
ThresholdStopper: {
'monitor': 'MulticlassAccuracy',
'dataloader_label': 'train',
'threshold': 0.99,
},
EarlyStopper: {
'monitor': 'MulticlassAccuracy',
'dataloader_label': 'train',
},
ExportForInferenceCallback: {
'save_format': 'torchscript',
'save_path': '/tmp/model.pth',
},
MLPerfCallback: {
'root_folder': '.',
'index': 0,
},
SpeedMonitor: {
'window_size': 1,
},
}
_callback_marks: Dict[Type[Callback], List[pytest.MarkDecorator],] = {
RemoteUploaderDownloader: [
pytest.mark.filterwarnings(
# post_close might not be called if being used outside of the trainer
r'ignore:Implicitly cleaning up:ResourceWarning'),
pytest.mark.skipif(not _LIBCLOUD_INSTALLED, reason='Libcloud is optional')
],
MemoryMonitor: [
pytest.mark.filterwarnings(
r'ignore:The memory monitor only works on CUDA devices, but the model is on cpu:UserWarning')
],
MLPerfCallback: [pytest.mark.skipif(not _MLPERF_INSTALLED, reason='MLPerf is optional')],
WandBLogger: [
pytest.mark.filterwarnings(r'ignore:unclosed file:ResourceWarning'),
pytest.mark.skipif(not _WANDB_INSTALLED, reason='Wandb is optional'),
],
ProgressBarLogger: [
pytest.mark.filterwarnings(
r'ignore:Specifying the ProgressBarLogger via `loggers` is not recommended as.*:Warning')
],
ConsoleLogger: [
pytest.mark.filterwarnings(r'ignore:Specifying the ConsoleLogger via `loggers` is not recommended as.*:Warning')
],
CometMLLogger: [pytest.mark.skipif(not _COMETML_INSTALLED, reason='comet_ml is optional'),],
TensorboardLogger: [pytest.mark.skipif(not _TENSORBOARD_INSTALLED, reason='Tensorboard is optional'),],
ImageVisualizer: [pytest.mark.skipif(not _WANDB_INSTALLED, reason='Wandb is optional')],
MLFlowLogger: [pytest.mark.skipif(not _MLFLOW_INSTALLED, reason='mlflow is optional'),],
}
def get_cb_kwargs(impl: Type[Callback]):
return _callback_kwargs.get(impl, {})
def _to_pytest_param(impl):
if impl not in _callback_marks:
return pytest.param(impl)
else:
marks = _callback_marks[impl]
return pytest.param(impl, marks=marks)
def get_cbs_and_marks(callbacks: bool = False, loggers: bool = False, profilers: bool = False):
"""Returns a list of :class:`pytest.mark.param` objects for all :class:`.Callback`.
The callbacks are correctly annotated with ``skipif`` marks for optional dependencies
and ``filterwarning`` marks for any warnings that might be emitted and are safe to ignore
This function is meant to be used like this::
import pytest
from tests.callbacks.callback_settings import get_cbs_and_marks, get_cb_kwargs
@pytest.mark.parametrize("cb_cls",get_cbs_and_marks(callbacks=True, loggers=True, profilers=True))
def test_something(cb_cls: Type[Callback]):
cb_kwargs = get_cb_kwargs(cb_cls)
cb = cb_cls(**cb_kwargs)
assert isinstance(cb, Callback)
"""
implementations = []
if callbacks:
implementations.extend(get_module_subclasses(composer.callbacks, Callback))
if loggers:
implementations.extend(get_module_subclasses(composer.loggers, LoggerDestination))
if profilers:
implementations.extend(get_module_subclasses(composer.profiler, Callback))
ans = [_to_pytest_param(impl) for impl in implementations]
if not len(ans):
raise ValueError('callbacks, loggers, or profilers must be True')
return ans
def get_cb_hparams_and_marks():
"""Returns a list of :class:`pytest.mark.param` objects for all ``callback_registry``
and ``logger_registry``entries.
The callbacks are correctly annotated with ``skipif`` marks for optional dependencies
and ``filterwarning`` marks for any warnings that might be emitted and are safe to ignore
This function is meant to be used like this::
import pytest
from tests.common.hparams import construct_from_yaml
from tests.callbacks.callback_settings import get_cb_hparams_and_marks, get_cb_kwargs
@pytest.mark.parametrize("constructor",get_cb_hparams_and_marks())
def test_something(constructor: Callable, yaml_dict: Dict[str, Any]):
yaml_dict = get_cb_kwargs(constructor)
construct_from_yaml(constructor, yaml_dict=yaml_dict)
"""
# TODO: (Hanlin) populate this
implementations = []
ans = [_to_pytest_param(impl) for impl in implementations]
return ans
| composer-dev | tests/callbacks/callback_settings.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/callbacks/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# ignore third-party missing imports due to the mlperf logger not pip-installable
# pyright: reportMissingImports=none
import logging
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from torch.utils.data import DataLoader
from torchmetrics.classification import BinaryAccuracy
from composer import State, Trainer
from composer.callbacks import MLPerfCallback
from composer.utils import dist
from tests.common import RandomClassificationDataset, SimpleModel
from tests.common.markers import device, world_size
def rank_zero() -> bool:
return dist.get_global_rank() == 0
@pytest.fixture(autouse=True)
def importor_skip_mlperf_logging():
pytest.importorskip('mlperf_logging')
# MLperf requires different number of results
# depending on the benchmark
NUM_TRIALS = {
'resnet': 5,
'bert': 10,
}
class MockMLLogger:
"""Mocks the MLPerf Logger interface."""
def __init__(self) -> None:
self.logs = []
self.logger = Mock()
def event(self, key, metadata, value=None):
self.logs.append({'key': key, 'value': value, 'metadata': metadata})
class TestMLPerfCallbackEvents:
@pytest.fixture
def mlperf_callback(self, monkeypatch, tmp_path) -> MLPerfCallback:
"""Returns a callback with the MockMLLogger patched."""
callback = MLPerfCallback(tmp_path, 0)
monkeypatch.setattr(callback, 'mllogger', MockMLLogger())
return callback
@pytest.fixture
def mock_state(self):
"""Mocks a state at epoch 1 with Accuracy 0.99."""
acc = BinaryAccuracy()
eval_metrics = {'eval': {'BinaryAccuracy': acc}}
acc.update(
torch.tensor([1, 1], dtype=torch.int8),
torch.tensor([1, 1], dtype=torch.int8),
)
state = Mock()
state.eval_metrics = eval_metrics
state.timestamp.epoch.value = 1
return state
def test_eval_start(self, mlperf_callback, mock_state):
mlperf_callback.eval_start(mock_state, Mock())
if not rank_zero():
assert mlperf_callback.mllogger.logs == []
return
assert mlperf_callback.mllogger.logs == [{'key': 'eval_start', 'value': None, 'metadata': {'epoch_num': 1}}]
def test_eval_end(self, mlperf_callback, mock_state):
mlperf_callback.eval_end(mock_state, Mock())
if not rank_zero():
assert mlperf_callback.success == False
assert mlperf_callback.mllogger.logs == []
return
assert mlperf_callback.success == True
assert mlperf_callback.mllogger.logs[-1] == {
'key': 'run_stop',
'value': None,
'metadata': {
'status': 'success'
}
}
@world_size(1, 2)
@device('cpu', 'gpu')
@pytest.mark.parametrize('benchmark', ['resnet', 'bert'])
class TestWithMLPerfChecker:
"""Ensures that the logs created by the MLPerfCallback pass the official package checker."""
def test_mlperf_callback_passes(self, tmp_path, monkeypatch, benchmark, world_size, device):
def mock_accuracy(self, state: State):
if state.timestamp.epoch >= 2:
return 0.99
else:
return 0.01
monkeypatch.setattr(MLPerfCallback, '_get_accuracy', mock_accuracy)
self.generate_submission(tmp_path, device, benchmark)
if rank_zero():
self.run_mlperf_checker(tmp_path, monkeypatch)
def test_mlperf_callback_fails(self, tmp_path, monkeypatch, benchmark, world_size, device):
def mock_accuracy(self, state: State):
return 0.01
monkeypatch.setattr(MLPerfCallback, '_get_accuracy', mock_accuracy)
self.generate_submission(tmp_path, device, benchmark)
with pytest.raises(ValueError, match='MLPerf checker failed'):
self.run_mlperf_checker(tmp_path, monkeypatch)
def generate_submission(self, directory, device, benchmark):
"""Generates submission files by training the benchark n=5 times."""
for run in range(NUM_TRIALS[benchmark]):
mlperf_callback = MLPerfCallback(
benchmark=benchmark,
root_folder=directory,
index=run,
cache_clear_cmd='sleep 0.1',
)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(
dataset=RandomClassificationDataset(),
batch_size=4,
shuffle=False,
),
eval_dataloader=DataLoader(
dataset=RandomClassificationDataset(),
shuffle=False,
),
max_duration='3ep',
deterministic_mode=True,
progress_bar=False,
log_to_console=False,
loggers=[],
device=device,
callbacks=[mlperf_callback],
seed=np.random.randint(low=2048),
)
trainer.fit()
def run_mlperf_checker(self, directory, monkeypatch):
"""Runs the MLPerf package checker and fails on any errors."""
# monkeypatch the logging so that logging.error raises Exception
def fail_on_error(msg, *args, **kwargs):
print(msg.format(*args))
raise ValueError('MLPerf checker failed, see logs.')
monkeypatch.setattr(logging, 'error', fail_on_error)
from mlperf_logging.package_checker.package_checker import check_training_package
check_training_package(
folder=directory,
usage='training',
ruleset='2.1.0',
werror=True,
quiet=False,
rcp_bypass=False,
rcp_bert_train_samples=False,
log_output='package_checker.log',
)
| composer-dev | tests/callbacks/test_mlperf_callback.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Type
import pytest
from torch.utils.data import DataLoader
from composer.core import Callback
from composer.loggers import ConsoleLogger, LoggerDestination, ProgressBarLogger
from composer.loggers.remote_uploader_downloader import RemoteUploaderDownloader
from composer.trainer import Trainer
from tests.callbacks.callback_settings import get_cb_kwargs, get_cbs_and_marks
from tests.common import RandomClassificationDataset, SimpleModel
@pytest.mark.parametrize('logger_cls', get_cbs_and_marks(loggers=True))
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True))
def test_loggers_on_callbacks(logger_cls: Type[LoggerDestination], callback_cls: Type[Callback]):
if logger_cls in [ProgressBarLogger, ConsoleLogger]:
pytest.skip()
logger_kwargs = get_cb_kwargs(logger_cls)
if issubclass(logger_cls, RemoteUploaderDownloader):
# Ensure that the remote directory does not conflict with any directory used by callbacks
logger_kwargs['backend_kwargs']['provider_kwargs']['key'] = './remote'
os.makedirs(logger_kwargs['backend_kwargs']['provider_kwargs']['key'], exist_ok=True)
logger = logger_cls(**logger_kwargs)
callback_kwargs = get_cb_kwargs(callback_cls)
callback = callback_cls(**callback_kwargs)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=2,
max_duration='1ep',
callbacks=callback,
loggers=logger,
)
trainer.fit()
| composer-dev | tests/callbacks/test_loggers_across_callbacks.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import datetime
import time
import pytest
from torch.utils.data import DataLoader
from composer.callbacks import RuntimeEstimator
from composer.core import Time
from composer.loggers import InMemoryLogger
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
def _assert_no_negative_values(logged_values):
for timestamp, v in logged_values:
del timestamp # unused
if isinstance(v, Time):
assert int(v) >= 0
elif isinstance(v, datetime.timedelta):
assert v.total_seconds() >= 0
else:
assert v >= 0
@pytest.mark.parametrize('time_unit', ['seconds', 'minutes', 'hours', 'days'])
def test_runtime_estimator(time_unit: str):
# Construct the callbacks
skip_batches = 1
runtime_estimator = RuntimeEstimator(skip_batches=skip_batches, time_unit=time_unit)
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
simple_model = SimpleModel()
original_fwd = simple_model.forward
def new_fwd(x):
time.sleep(0.02)
return original_fwd(x)
simple_model.forward = new_fwd # type: ignore
# Construct the trainer and train
trainer = Trainer(
model=simple_model,
callbacks=runtime_estimator,
loggers=in_memory_logger,
train_dataloader=DataLoader(RandomClassificationDataset()),
eval_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='2ep',
eval_interval='1ep',
train_subset_num_batches=5,
eval_subset_num_batches=5,
)
trainer.fit()
wall_clock_remaining_calls = len(in_memory_logger.data['wall_clock/remaining_estimate'])
_assert_no_negative_values(in_memory_logger.data['wall_clock/remaining_estimate'])
expected_calls = int(trainer.state.timestamp.batch) - skip_batches
assert wall_clock_remaining_calls == expected_calls
ba_2_estimate = in_memory_logger.data['wall_clock/remaining_estimate'][1][-1]
# Should be ~0.2 seconds
if time_unit == 'seconds':
assert ba_2_estimate < 1
assert ba_2_estimate > 0.1
elif time_unit == 'minutes':
assert ba_2_estimate < 1 / 60
assert ba_2_estimate > 0.1 / 60
elif time_unit == 'hours':
assert ba_2_estimate < 1 / 60 / 60
assert ba_2_estimate > 0.1 / 60 / 60
elif time_unit == 'days':
assert ba_2_estimate < 1 / 60 / 60 / 24
assert ba_2_estimate > 0.1 / 60 / 60 / 24
| composer-dev | tests/callbacks/test_runtime_estimator.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List
import pytest
from torch.utils.data import DataLoader
from torchmetrics.classification import MulticlassAccuracy
from composer import Trainer
from composer.callbacks import ThresholdStopper
from composer.core.time import TimeUnit
from composer.devices import DeviceCPU, DeviceGPU
from tests.common import RandomClassificationDataset, SimpleModel, device
from tests.metrics import MetricSetterCallback
@device('cpu', 'gpu')
@pytest.mark.parametrize('metric_sequence', [[0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8], [0.6, 0.7]])
@pytest.mark.parametrize('unit', [TimeUnit.EPOCH, TimeUnit.BATCH])
def test_threshold_stopper_eval(metric_sequence: List[float], unit: TimeUnit, device: str):
metric_threshold = 0.65
if unit == TimeUnit.EPOCH:
dataloader_label = 'eval'
stop_on_batch = False
else:
dataloader_label = 'train'
stop_on_batch = True
test_device = DeviceGPU() if device == 'gpu' else DeviceCPU()
tstop = ThresholdStopper(
'MulticlassAccuracy',
dataloader_label,
metric_threshold,
comp=None,
stop_on_batch=stop_on_batch,
)
test_metric_setter = MetricSetterCallback(
'MulticlassAccuracy',
dataloader_label,
MulticlassAccuracy,
metric_sequence,
unit,
test_device,
metric_args={
'num_classes': 2,
'average': 'micro'
},
)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
eval_dataloader=DataLoader(RandomClassificationDataset()),
train_subset_num_batches=1,
eval_subset_num_batches=1,
device=test_device,
max_duration='30ep',
callbacks=[test_metric_setter, tstop],
)
trainer.fit()
count_before_threshold = 0
for metric in metric_sequence:
if metric_threshold > metric:
count_before_threshold += 1
assert trainer.state.timestamp.get(unit).value == count_before_threshold
| composer-dev | tests/callbacks/test_threshold_stopper.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List
import pytest
from torch.utils.data import DataLoader
from torchmetrics.classification import MulticlassAccuracy
from composer import Trainer
from composer.callbacks.early_stopper import EarlyStopper
from composer.core.time import Time, TimeUnit
from composer.devices import DeviceCPU, DeviceGPU
from tests.common import RandomClassificationDataset, SimpleModel, device
from tests.metrics import MetricSetterCallback
@device('cpu', 'gpu')
@pytest.mark.parametrize('metric_sequence', [[0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], [0.1, 0.2]])
@pytest.mark.parametrize('unit', [TimeUnit.EPOCH, TimeUnit.BATCH])
def test_early_stopper(metric_sequence: List[float], unit: TimeUnit, device: str):
if unit == TimeUnit.EPOCH:
dataloader_label = 'eval'
else:
dataloader_label = 'train'
test_device = DeviceGPU() if device == 'gpu' else DeviceCPU()
early_stopper = EarlyStopper('MulticlassAccuracy', dataloader_label, patience=Time(3, unit))
test_metric_setter = MetricSetterCallback(
'MulticlassAccuracy',
dataloader_label,
MulticlassAccuracy,
metric_sequence,
unit,
test_device,
metric_args={
'num_classes': 2,
'average': 'micro'
},
)
trainer = Trainer(
model=SimpleModel(num_features=5),
train_dataloader=DataLoader(
RandomClassificationDataset(shape=(5, 1, 1)),
batch_size=4,
),
eval_dataloader=DataLoader(
RandomClassificationDataset(shape=(5, 1, 1)),
batch_size=4,
),
max_duration='30ep',
callbacks=[test_metric_setter, early_stopper],
)
trainer.fit()
assert trainer.state.timestamp.get(unit).value == len(metric_sequence) + int(early_stopper.patience)
| composer-dev | tests/callbacks/test_early_stopper.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import os
from pathlib import Path
import pytest
import transformers
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from composer import Evaluator
from composer.datasets.in_context_learning_evaluation import (_get_fewshot_sample_idxs, _make_padded_input,
get_icl_task_dataloader)
from composer.loggers import InMemoryLogger
from composer.metrics import (InContextLearningLMAccuracy, InContextLearningMultipleChoiceAccuracy,
InContextLearningQAAccuracy)
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
from composer.utils import dist, reproducibility
from tests.common import device, world_size
def test_fewshot_sample_idxs():
fewshot_idxs = _get_fewshot_sample_idxs(dataset_size=5, num_fewshot=4, sample_idx=4)
assert fewshot_idxs == set([0, 1, 2, 3])
fewshot_idxs = _get_fewshot_sample_idxs(dataset_size=5, num_fewshot=5, sample_idx=4)
assert fewshot_idxs == set([0, 1, 2, 3])
fewshot_idxs = _get_fewshot_sample_idxs(dataset_size=5, num_fewshot=500, sample_idx=4)
assert fewshot_idxs == set([0, 1, 2, 3])
fewshot_idxs = _get_fewshot_sample_idxs(dataset_size=10, num_fewshot=7, sample_idx=4)
assert len(fewshot_idxs) == 7 and 4 not in fewshot_idxs
def test_batch_padding_logic(tiny_gpt2_tokenizer):
continuation = tiny_gpt2_tokenizer(' dog' * 2000)['input_ids']
context = tiny_gpt2_tokenizer(' cat' * 2000)['input_ids']
_, continuation_spans = _make_padded_input(context, continuation, 2048, tiny_gpt2_tokenizer.eos_token_id)
# the context (of len 2000) gets clipped to len 48 so that the whole continuation can fit
assert continuation_spans[0] == 48 and continuation_spans[-1] == 2047
@pytest.mark.parametrize('padding_side', ['left', 'right', 'middle'])
def test_make_padding(tiny_gpt2_tokenizer, padding_side):
context = tiny_gpt2_tokenizer(' cat' * 2000)['input_ids']
padding_id = tiny_gpt2_tokenizer.eos_token_id
error_context = contextlib.nullcontext() if padding_side in {'left', 'right'} else pytest.raises(ValueError)
with error_context:
input_ids, _ = _make_padded_input(context, [], 2048, padding_id, padding_side=padding_side)
if padding_side == 'left':
assert input_ids[0] == tiny_gpt2_tokenizer.eos_token_id
assert input_ids[48:].tolist() == context
elif padding_side == 'right':
assert input_ids[-1] == tiny_gpt2_tokenizer.eos_token_id
assert input_ids[:-48].tolist() == context
@pytest.mark.parametrize('dataset_uri', ['lambada_small.jsonl'])
def test_lm_task_dataloader(dataset_uri, tiny_gpt2_tokenizer, tmp_path):
pytest.importorskip('datasets')
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
tokenizer = tiny_gpt2_tokenizer
dataset_uri = f'{local_data}/{dataset_uri}'
batch_size = 2
seqlen = 2048
dl = get_icl_task_dataloader('language_modeling',
dataset_uri,
tokenizer,
batch_size,
max_seq_len=seqlen,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=0,
prompt_string='',
example_delimiter='\n',
continuation_delimiter='',
destination_path=str(tmp_path / 'icl.jsonl'))
assert isinstance(dl.dataloader, DataLoader) # pyright
batch = next(dl.dataloader._get_iterator())
assert 'input_ids' in batch
assert tuple(batch['input_ids'].shape) == (batch_size, seqlen)
assert 'attention_mask' in batch
assert tuple(batch['attention_mask'].shape) == (batch_size, seqlen)
assert 'continuation_indices' in batch
assert isinstance(batch['continuation_indices'], list) and len(batch['continuation_indices']) == batch_size
assert 'mode' in batch
assert batch['mode'] == 'icl_task'
min_idx = min(batch['continuation_indices'][0]).item()
max_idx = max(batch['continuation_indices'][0]).item()
assert tokenizer.decode(batch['input_ids'][0][min_idx:max_idx + 1]) == ' glen'
@pytest.mark.parametrize('dataset_uri', ['lambada_small.jsonl'])
@pytest.mark.parametrize('num_fewshot', [0, 1])
def test_lm_task_dataloader_opt_tokenizer(dataset_uri, num_fewshot, tmp_path):
pytest.importorskip('datasets')
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
tokenizer = AutoTokenizer.from_pretrained('facebook/opt-125m', use_fast=False)
dataset_uri = f'{local_data}/{dataset_uri}'
batch_size = 2
seqlen = 2048
dl = get_icl_task_dataloader('language_modeling',
dataset_uri,
tokenizer,
batch_size,
max_seq_len=seqlen,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string='',
example_delimiter='\n',
continuation_delimiter='',
destination_path=str(tmp_path / 'icl.jsonl'))
assert isinstance(dl.dataloader, DataLoader) # pyright
batch = next(dl.dataloader._get_iterator())
assert 'input_ids' in batch
assert tuple(batch['input_ids'].shape) == (batch_size, seqlen)
assert 'attention_mask' in batch
assert tuple(batch['attention_mask'].shape) == (batch_size, seqlen)
assert 'continuation_indices' in batch
assert isinstance(batch['continuation_indices'], list) and len(batch['continuation_indices']) == batch_size
assert 'mode' in batch
assert batch['mode'] == 'icl_task'
min_idx = min(batch['continuation_indices'][0]).item()
max_idx = max(batch['continuation_indices'][0]).item()
assert tokenizer.decode(batch['input_ids'][0][min_idx:max_idx + 1]) == ' glen'
assert tokenizer.decode(batch['input_ids'][0][0:min_idx]).startswith('</s>')
assert tokenizer.decode(batch['input_ids'][0][0:min_idx]).count('</s>') == 1
@pytest.mark.parametrize('dataset_uri', ['piqa_small.jsonl'])
@pytest.mark.parametrize('num_fewshot', [0, 1])
def test_mc_task_dataloader_opt_tokenizer(dataset_uri, num_fewshot, tmp_path):
pytest.importorskip('datasets')
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
tokenizer = AutoTokenizer.from_pretrained('facebook/opt-125m', use_fast=False)
dataset_uri = f'{local_data}/{dataset_uri}'
batch_size = 2
seqlen = 2048
dl = get_icl_task_dataloader('multiple_choice',
dataset_uri,
tokenizer,
batch_size,
max_seq_len=seqlen,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string='',
example_delimiter='\n',
continuation_delimiter=': ',
destination_path=str(tmp_path / 'icl.jsonl'))
assert isinstance(dl.dataloader, DataLoader) # pyright
batch = next(dl.dataloader._get_iterator())
choices_per_question = 2
assert 'input_ids' in batch
assert tuple(batch['input_ids'].shape) == (batch_size, seqlen)
assert 'attention_mask' in batch
assert tuple(batch['attention_mask'].shape) == (batch_size, seqlen)
assert 'continuation_indices' in batch
assert isinstance(batch['continuation_indices'], list) and len(batch['continuation_indices']) == batch_size
assert 'mode' in batch
assert batch['mode'] == 'icl_task'
assert 'gold_indices' in batch
assert isinstance(batch['gold_indices'], list) and len(batch['gold_indices']) == batch_size // choices_per_question
assert 'choice_groupings' in batch
assert isinstance(batch['choice_groupings'], list) and len(
batch['choice_groupings']) == batch_size // choices_per_question
min_idx = min(batch['continuation_indices'][0]).item()
max_idx = max(batch['continuation_indices'][0]).item()
assert tokenizer.decode(batch['input_ids'][0][min_idx:max_idx + 1]) == ': Pour it onto a plate'
assert tokenizer.decode(batch['input_ids'][0][0:min_idx]).startswith('</s>')
assert tokenizer.decode(batch['input_ids'][0][0:min_idx]).count('</s>') == 1
@pytest.mark.parametrize('dataset_uri', ['triviaqa_small.jsonl'])
@pytest.mark.parametrize('num_fewshot', [0, 1, 2])
@pytest.mark.parametrize('prompt_string', ['I am a prompt', ''])
def test_qa_task_dataloader(dataset_uri, tiny_gpt2_tokenizer, tmp_path, num_fewshot, prompt_string):
pytest.importorskip('datasets')
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
tokenizer = tiny_gpt2_tokenizer
dataset_uri = f'{local_data}/{dataset_uri}'
batch_size = 2
seqlen = 2048
# empirical number from the small test dataset
maximum_answer_length = 9
dl = get_icl_task_dataloader('question_answering',
dataset_uri,
tokenizer,
batch_size,
max_seq_len=seqlen,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string=prompt_string,
example_delimiter='\n',
question_prelimiter='Q: ',
continuation_delimiter='\nA:',
destination_path=str(tmp_path / f'icl_{num_fewshot}.jsonl'))
assert isinstance(dl.dataloader, DataLoader) # pyright
batch = next(dl.dataloader._get_iterator())
assert tuple(batch['input_ids'].shape) == (batch_size, seqlen - maximum_answer_length)
assert tuple(batch['attention_mask'].shape) == (batch_size, seqlen - maximum_answer_length)
assert batch['mode'] == 'generate'
# the maximum generation length from the small test data
assert batch['generation_length'] == maximum_answer_length
assert all(item[0] == tokenizer.eos_token_id for item in batch['input_ids'])
decoded_batch = tokenizer.batch_decode(batch['input_ids'])
assert all([item.count('Q: ') == num_fewshot + 1 for item in decoded_batch])
assert all([item.count('\nA:') == num_fewshot + 1 for item in decoded_batch])
if len(prompt_string) > 0:
assert all([item.count('I am a prompt') == 1 for item in decoded_batch])
assert batch['labels'] == [['David Seville'], ['Scorpio', 'Skorpio']]
assert decoded_batch[0].endswith('Q: Who was the man behind The Chipmunks?\nA:')
assert decoded_batch[1].endswith('Q: What star sign is Jamie Lee Curtis?\nA:')
@pytest.mark.parametrize('dataset_uri', ['piqa_small.jsonl'])
def test_mc_task_dataloader(dataset_uri, tiny_gpt2_tokenizer, tmp_path):
pytest.importorskip('datasets')
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
tokenizer = tiny_gpt2_tokenizer
dataset_uri = f'{local_data}/{dataset_uri}'
batch_size = 2
seqlen = 2048
dl = get_icl_task_dataloader('multiple_choice',
dataset_uri,
tokenizer,
batch_size,
max_seq_len=seqlen,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=1,
prompt_string='',
example_delimiter='\n',
continuation_delimiter=': ',
destination_path=str(tmp_path / 'icl.jsonl'))
assert isinstance(dl.dataloader, DataLoader) # pyright
batch = next(dl.dataloader._get_iterator())
choices_per_question = 2
assert 'input_ids' in batch
assert tuple(batch['input_ids'].shape) == (batch_size, seqlen)
assert 'attention_mask' in batch
assert tuple(batch['attention_mask'].shape) == (batch_size, seqlen)
assert 'continuation_indices' in batch
assert isinstance(batch['continuation_indices'], list) and len(batch['continuation_indices']) == batch_size
assert 'mode' in batch
assert batch['mode'] == 'icl_task'
assert 'gold_indices' in batch
assert isinstance(batch['gold_indices'], list) and len(batch['gold_indices']) == batch_size // choices_per_question
assert 'choice_groupings' in batch
assert isinstance(batch['choice_groupings'], list) and len(
batch['choice_groupings']) == batch_size // choices_per_question
min_idx = min(batch['continuation_indices'][0]).item()
max_idx = max(batch['continuation_indices'][0]).item()
assert tokenizer.decode(batch['input_ids'][0][min_idx:max_idx + 1]) == ': Pour it onto a plate'
@pytest.mark.parametrize('dataset_uri', ['lambada_small.jsonl'])
@pytest.mark.parametrize('num_fewshot', [0, 5])
@device('gpu')
def test_lm_task_evaluation(device, dataset_uri, num_fewshot, tiny_gpt2_tokenizer, tmp_path):
pytest.importorskip('datasets')
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
dataset_uri = f'{local_data}/{dataset_uri}'
tokenizer = tiny_gpt2_tokenizer
dl = get_icl_task_dataloader(
'language_modeling',
dataset_uri,
tokenizer,
2,
max_seq_len=2048,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string='',
example_delimiter='\n',
continuation_delimiter='',
destination_path=str(tmp_path / 'icl.jsonl'),
)
evaluator = Evaluator(label='lambada', dataloader=dl, metric_names=['InContextLearningLMAccuracy'])
config = transformers.AutoConfig.from_pretrained('EleutherAI/gpt-neo-125M')
model = transformers.AutoModelForCausalLM.from_config(config)
model = HuggingFaceModel(
model=model,
tokenizer=None,
eval_metrics=[InContextLearningLMAccuracy()],
use_logits=True,
)
trainer = Trainer(model=model, max_duration='1ep', loggers=in_memory_logger)
trainer.eval(eval_dataloader=evaluator, subset_num_batches=2)
assert 'metrics/lambada/InContextLearningLMAccuracy' in in_memory_logger.data.keys()
assert in_memory_logger.data['metrics/lambada/InContextLearningLMAccuracy'][0][1].item() == 0
@pytest.mark.parametrize('dataset_uri', ['piqa_small.jsonl', 'hellaswag_small.jsonl'])
@device('gpu')
@pytest.mark.parametrize('num_fewshot', [0, 5])
def test_mc_task_evaluation(device, num_fewshot, dataset_uri, tiny_gpt2_tokenizer, tmp_path, tiny_gpt2_model):
pytest.importorskip('datasets')
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
dataset_uri = f'{local_data}/{dataset_uri}'
tokenizer = tiny_gpt2_tokenizer
# seed because the fewshot selection is currently unseeded
reproducibility.seed_all(1234)
dl = get_icl_task_dataloader(
'multiple_choice',
dataset_uri,
tokenizer,
8,
max_seq_len=1024,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string='',
example_delimiter='\n',
continuation_delimiter=': ',
destination_path=str(tmp_path / 'icl.jsonl'),
)
evaluator = Evaluator(label='lambada', dataloader=dl, metric_names=['InContextLearningMultipleChoiceAccuracy'])
model = HuggingFaceModel(
model=tiny_gpt2_model,
tokenizer=None,
eval_metrics=[InContextLearningMultipleChoiceAccuracy()],
use_logits=True,
)
trainer = Trainer(model=model, max_duration='1ba', loggers=in_memory_logger)
trainer.eval(eval_dataloader=evaluator, subset_num_batches=2)
assert 'metrics/lambada/InContextLearningMultipleChoiceAccuracy' in in_memory_logger.data.keys()
assert in_memory_logger.data['metrics/lambada/InContextLearningMultipleChoiceAccuracy'][0][1].item() > 0
@pytest.mark.parametrize('dataset_uri', ['triviaqa_small.jsonl'])
@device('gpu')
@world_size(1, 2)
@pytest.mark.parametrize('num_fewshot', [0, 5])
def test_qa_task_evaluation(device, world_size, num_fewshot, dataset_uri, tiny_gpt2_tokenizer, tiny_gpt2_model,
tmp_path):
pytest.importorskip('datasets')
in_memory_logger = InMemoryLogger() # track the logged metrics in the in_memory_logger
local_data = os.path.join(os.path.dirname(__file__), 'local_data')
dataset_uri = f'{local_data}/{dataset_uri}'
tokenizer = tiny_gpt2_tokenizer
tmp_path_to_broadcast = str(os.path.abspath(tmp_path))
gathered_paths = dist.all_gather_object(tmp_path_to_broadcast)
dl = get_icl_task_dataloader(
'question_answering',
dataset_uri,
tokenizer,
2,
max_seq_len=1024,
pad_tok_id=tokenizer.eos_token_id,
num_fewshot=num_fewshot,
prompt_string='',
example_delimiter='\n',
continuation_delimiter=': ',
destination_path=str(Path(gathered_paths[0]) / 'icl.jsonl'),
)
evaluator = Evaluator(label='triviaqa', dataloader=dl, metric_names=['InContextLearningQAAccuracy'])
model = HuggingFaceModel(
model=tiny_gpt2_model,
tokenizer=tiny_gpt2_tokenizer,
eval_metrics=[InContextLearningQAAccuracy()],
use_logits=True,
)
trainer = Trainer(model=model, max_duration='1ba', loggers=in_memory_logger)
trainer.eval(eval_dataloader=evaluator, subset_num_batches=2)
assert 'metrics/triviaqa/InContextLearningQAAccuracy' in in_memory_logger.data.keys()
assert in_memory_logger.data['metrics/triviaqa/InContextLearningQAAccuracy'][0][1].item() == 0
| composer-dev | tests/datasets/test_in_context_learning_datasets.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from composer.datasets import build_cifar10_dataloader, build_synthetic_cifar10_dataloader
@pytest.mark.parametrize('is_train', [False, True])
@pytest.mark.parametrize('synthetic', [pytest.param(False, marks=pytest.mark.daily), True])
def test_cifar10_shape_length(is_train, synthetic):
batch_size = 1
if synthetic:
dataspec = build_synthetic_cifar10_dataloader(global_batch_size=batch_size, is_train=is_train)
else:
dataspec = build_cifar10_dataloader(datadir='/tmp', global_batch_size=batch_size, is_train=is_train)
samples = [_ for _ in dataspec.dataloader]
if is_train:
assert len(samples) == 50000 // batch_size
else:
assert len(samples) == 10000 // batch_size
assert samples[0][0].shape == (1, 3, 32, 32)
| composer-dev | tests/datasets/test_cifar.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Tuple
import numpy as np
import pytest
import torch
from PIL import Image
from composer.datasets.utils import pil_image_collate
@pytest.fixture
def num_samples():
return 4
@pytest.fixture
def image_size():
return (16, 16)
@pytest.fixture
def pil_image_list(num_samples: int, image_size: Tuple[int, int]):
return [Image.new(mode='RGB', size=image_size, color=(i, i, i)) for i in range(num_samples)]
@pytest.fixture
def pil_target_list(num_samples: int, image_size: Tuple[int, int]):
return [Image.new(mode='L', size=image_size, color=i) for i in range(num_samples)]
@pytest.fixture
def correct_image_tensor(num_samples: int, image_size: Tuple[int, int]):
return torch.arange(num_samples).expand(3, *image_size, -1).permute(3, 0, 1, 2)
@pytest.fixture
def scalar_target_list(num_samples: int):
return np.arange(num_samples)
def test_scalar_target_collate(pil_image_list: List[Image.Image], scalar_target_list: np.ndarray,
correct_image_tensor: torch.Tensor):
batch = [(img, target) for img, target in zip(pil_image_list, scalar_target_list)]
image_tensor, target_tensor = pil_image_collate(batch=batch)
correct_target_tensor = torch.arange(correct_image_tensor.shape[0])
assert torch.all(image_tensor == correct_image_tensor) and torch.all(target_tensor == correct_target_tensor)
def test_image_target_collate(pil_image_list: List[Image.Image], pil_target_list: List[Image.Image],
correct_image_tensor):
batch = [(img, target) for img, target in zip(pil_image_list, pil_target_list)]
image_tensor, target_tensor = pil_image_collate(
batch=batch) # type: ignore "Image" is incompatible with "ndarray[Unknown, Unknown]"
assert torch.all(image_tensor == correct_image_tensor) and torch.all(target_tensor == correct_image_tensor[:, 0])
| composer-dev | tests/datasets/test_dataset_utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/datasets/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from composer.datasets import build_mnist_dataloader, build_synthetic_mnist_dataloader
@pytest.mark.parametrize('is_train', [False, True])
@pytest.mark.parametrize('synthetic', [pytest.param(False, marks=pytest.mark.daily), True])
def test_mnist_shape_length(is_train, synthetic):
batch_size = 1
if synthetic:
loader = build_synthetic_mnist_dataloader(global_batch_size=batch_size, is_train=is_train)
else:
loader = build_mnist_dataloader(datadir='/tmp', global_batch_size=batch_size, is_train=is_train)
samples = [_ for _ in loader]
if is_train:
assert len(samples) == 60000 // batch_size
else:
assert len(samples) == 10000 // batch_size
assert samples[0][0].shape == (1, 1, 28, 28)
| composer-dev | tests/datasets/test_mnist.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
import pytest
import torch
from composer.datasets.synthetic import (SyntheticBatchPairDataset, SyntheticDataLabelType, SyntheticDataType,
SyntheticPILDataset)
@pytest.mark.parametrize('data_type', [
SyntheticDataType.GAUSSIAN,
SyntheticDataType.SEPARABLE,
])
@pytest.mark.parametrize('label_type', [
SyntheticDataLabelType.CLASSIFICATION_ONE_HOT,
SyntheticDataLabelType.CLASSIFICATION_INT,
])
def test_synthetic_batch_pair_creation(data_type: SyntheticDataType, label_type: SyntheticDataLabelType):
if data_type == SyntheticDataType.SEPARABLE:
if label_type != SyntheticDataLabelType.CLASSIFICATION_INT:
pytest.skip('Separable data requires classification int labels')
num_classes = 2
label_shape = None
else:
num_classes = 10
label_shape = (1, 10, 12)
if data_type == SyntheticDataType.GAUSSIAN and label_type == SyntheticDataLabelType.CLASSIFICATION_INT:
pytest.xfail('classification_int is not currently supported with gaussian data')
dataset_size = 1000
data_shape = (3, 32, 32)
num_samples_to_create = 10
dataset = SyntheticBatchPairDataset(total_dataset_size=dataset_size,
data_shape=data_shape,
num_unique_samples_to_create=num_samples_to_create,
data_type=data_type,
label_type=label_type,
num_classes=num_classes,
label_shape=label_shape)
assert len(dataset) == dataset_size
# verify datapoints are correct
x, y = dataset[0]
assert x.size() == data_shape
if label_type == SyntheticDataLabelType.CLASSIFICATION_INT:
assert isinstance(y.item(), int)
elif label_type == SyntheticDataLabelType.CLASSIFICATION_ONE_HOT:
assert y.size() == (num_classes,)
assert torch.min(y) == 0
assert torch.max(y) == 1
# check that points were allocated in memory after the first call to __getitem__
assert dataset.input_data is not None
assert dataset.input_target is not None
# check that the correct number of points were allocated in memory
assert dataset.input_data.size()[0] == num_samples_to_create
assert dataset.input_target.size()[0] == num_samples_to_create
# verify that you can getch points outside the num_samples_to_create range
# (still within the total dataset size range)
x, y = dataset[num_samples_to_create + 1]
assert x is not None
assert y is not None
@pytest.mark.parametrize('label_type', [
SyntheticDataLabelType.CLASSIFICATION_ONE_HOT,
SyntheticDataLabelType.CLASSIFICATION_INT,
])
@pytest.mark.parametrize('num_classes', [None, 0])
def test_synthetic_classification_param_validation(label_type: SyntheticDataLabelType, num_classes: Optional[int]):
with pytest.raises(ValueError):
SyntheticBatchPairDataset(total_dataset_size=10,
data_shape=(2, 2),
label_type=label_type,
num_classes=num_classes)
@pytest.mark.parametrize('data_type', [
SyntheticDataType.GAUSSIAN,
SyntheticDataType.SEPARABLE,
])
@pytest.mark.parametrize('label_type', [
SyntheticDataLabelType.CLASSIFICATION_ONE_HOT,
SyntheticDataLabelType.CLASSIFICATION_INT,
])
def test_synthetic_image_data_creation(data_type: SyntheticDataType, label_type: SyntheticDataLabelType):
if data_type == SyntheticDataType.SEPARABLE:
if label_type != SyntheticDataLabelType.CLASSIFICATION_INT:
pytest.skip('Seperable data requires classification int labels')
num_classes = 2
label_shape = None
else:
num_classes = 10
label_shape = (1, 10, 12)
if data_type == SyntheticDataType.GAUSSIAN and label_type == SyntheticDataLabelType.CLASSIFICATION_INT:
pytest.xfail('classification_int is not currently supported with gaussian data')
dataset_size = 1000
data_shape = (32, 32)
num_samples_to_create = 100
dataset = SyntheticPILDataset(total_dataset_size=dataset_size,
data_shape=data_shape,
num_unique_samples_to_create=num_samples_to_create,
data_type=data_type,
label_type=label_type,
num_classes=num_classes,
label_shape=label_shape)
assert len(dataset) == dataset_size
# verify datapoints are correct
x, y = dataset[0]
assert x.size == data_shape
if label_type == SyntheticDataLabelType.CLASSIFICATION_INT:
assert isinstance(y.item(), int)
elif label_type == SyntheticDataLabelType.CLASSIFICATION_ONE_HOT:
assert y.size() == (num_classes,)
assert torch.min(y) == 0
assert torch.max(y) == 1
# check that points were allocated in memory after the first call to __getitem__
assert dataset._dataset.input_data is not None
assert dataset._dataset.input_target is not None
# check that the correct number of points were allocated in memory
assert dataset._dataset.input_data.shape[0] == num_samples_to_create
assert dataset._dataset.input_target.shape[0] == num_samples_to_create
# verify that you can getch points outside the num_samples_to_create range
# (still within the total dataset size range)
x, y = dataset[num_samples_to_create + 1]
assert x is not None
assert y is not None
| composer-dev | tests/datasets/test_synthetic_data.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torchvision import transforms
from composer.datasets.synthetic import SyntheticPILDataset
from composer.datasets.utils import add_vision_dataset_transform
image_size = 32
def generate_synthetic_dataset(data_transforms):
return SyntheticPILDataset(total_dataset_size=1000,
data_shape=[image_size, image_size],
num_classes=2,
transform=data_transforms)
def generate_default_transforms():
return transforms.Compose([transforms.RandomCrop(32), transforms.ToTensor(), transforms.RandomRotation(5)])
def generate_composition_no_tensor():
return transforms.Compose(
[transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(5)])
@pytest.mark.parametrize('is_tensor_transform,index', [(False, 1), (True, 2)])
def test_pre_post_to_tensor_compose(is_tensor_transform, index):
dataset = generate_synthetic_dataset(generate_default_transforms())
add_vision_dataset_transform(dataset, transforms.RandomAutocontrast(), is_tensor_transform=is_tensor_transform)
assert dataset.transform is not None
assert type(dataset.transform.transforms[index]) == transforms.RandomAutocontrast
@pytest.mark.parametrize('is_tensor_transform,index', [(False, 0), (True, 1)])
def test_pre_post_to_tensor(is_tensor_transform, index):
dataset = generate_synthetic_dataset(transforms.ToTensor())
add_vision_dataset_transform(dataset, transforms.RandomAutocontrast(), is_tensor_transform=is_tensor_transform)
assert dataset.transform is not None
assert type(dataset.transform.transforms[index]) == transforms.RandomAutocontrast
@pytest.mark.parametrize('data_transforms', [(generate_composition_no_tensor()), (transforms.RandomHorizontalFlip())])
def test_default_to_append(data_transforms):
dataset = generate_synthetic_dataset(data_transforms)
add_vision_dataset_transform(dataset, transforms.RandomAutocontrast())
assert dataset.transform is not None
assert type(dataset.transform.transforms[-1]) == transforms.RandomAutocontrast
def test_add_to_none_transform():
dataset = generate_synthetic_dataset(None)
add_vision_dataset_transform(dataset, transforms.RandomAutocontrast())
assert type(dataset.transform) == transforms.RandomAutocontrast
| composer-dev | tests/datasets/test_add_dataset_transform.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import pytest
from composer.datasets.ffcv_utils import write_ffcv_dataset
from composer.datasets.synthetic import SyntheticDataLabelType, SyntheticPILDataset
@pytest.mark.vision
def test_write_ffcv_dataset(tmp_path: pathlib.Path):
dataset = SyntheticPILDataset(total_dataset_size=1,
num_classes=1,
data_shape=[1, 1, 3],
label_type=SyntheticDataLabelType.CLASSIFICATION_INT,
num_unique_samples_to_create=1)
output_file = str(tmp_path / 'ffcv')
write_ffcv_dataset(dataset, write_path=output_file, num_workers=1)
assert os.path.exists(output_file)
| composer-dev | tests/datasets/test_ffcv_utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
from pathlib import Path
import numpy as np
import pytest
from torch.utils.data import DataLoader
from composer.metrics.nlp import LanguageCrossEntropy, MaskedAccuracy
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
from composer.utils import dist
from tests.common import device, world_size
@pytest.mark.skip(reason='CO-1735, failing intermittently on different nodes, additional debug required')
@pytest.mark.daily
@pytest.mark.remote
@device('cpu', 'gpu')
@world_size(1, 2)
@pytest.mark.parametrize('num_workers', [0, 1, 2])
@pytest.mark.parametrize('dataset,dataset_args,seed',
[('c4', {
'remote': 's3://mosaicml-internal-dataset-c4/mds/2/',
'tokenizer_name': 'bert-base-uncased',
'max_seq_len': 256,
'group_method': 'truncate'
}, 1),
('pile', {
'remote': 's3://mosaicml-internal-dataset-the-pile/mds/2/',
'tokenizer_name': 'bert-base-uncased',
'max_seq_len': 256,
'group_method': 'truncate'
}, 2), ('enwiki', {
'remote': 's3://mosaicml-internal-dataset-enwiki-20200101/mds/2b/'
}, 3)])
def test_streaming_datasets(num_workers, dataset, dataset_args, seed, tiny_bert_tokenizer, tiny_bert_model, world_size,
device, tmp_path):
# Need to initialize dist before we get to streaming, because streaming always uses NCCL
if not dist.is_initialized():
dist.initialize_dist(device=device)
streaming = pytest.importorskip('streaming')
transformers = pytest.importorskip('transformers')
name_to_cls = {
'c4': streaming.text.c4.StreamingC4,
'pile': streaming.text.pile.StreamingPile,
'enwiki': streaming.text.enwiki.StreamingEnWiki
}
full_seed = seed + (num_workers + 1) * 10 + (world_size + 1) * 100 + (1 if device == 'cpu' else 2) * 1000
# This seed setting is necessary to prevent a shared memory collision due to a streaming bug
np.random.seed(full_seed)
# distribute the local dataset path from rank 0
local_path = [os.path.abspath(tmp_path)]
dist.broadcast_object_list(local_path, src=0)
local_path = Path(local_path[0]) / dataset
streaming_dataset = name_to_cls[dataset](local=local_path,
split='val',
predownload=None,
batch_size=4,
**dataset_args)
pretraining_metrics = [
LanguageCrossEntropy(ignore_index=-100, vocab_size=tiny_bert_tokenizer.vocab_size),
MaskedAccuracy(ignore_index=-100)
]
model = HuggingFaceModel(model=tiny_bert_model, use_logits=True, metrics=pretraining_metrics)
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tiny_bert_tokenizer,
mlm_probability=0.15) if dataset != 'enwiki' else None
dataloader = DataLoader(streaming_dataset, batch_size=4, num_workers=num_workers, collate_fn=collator)
trainer = Trainer(model=model, train_dataloader=dataloader, max_duration='2ba', device=device)
trainer.fit()
# Necessary for some reason, otherwise streaming does not clean up properly, and tests fail
trainer.close()
if trainer.state.train_dataloader and trainer.state.train_dataloader._iterator is not None: # type: ignore [reportGeneralTypeIssues]
trainer.state.train_dataloader._iterator._shutdown_workers() # type: ignore [reportGeneralTypeIssues]
| composer-dev | tests/datasets/test_streaming_datasets_train.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from PIL import Image
from composer.datasets.ade20k import (PadToSize, PhotometricDistoration, RandomCropPair, RandomHFlipPair,
RandomResizePair)
@pytest.fixture
def size():
return 16, 16
@pytest.fixture
def sample_pair(size):
img = Image.new(mode='RGB', size=size)
target = Image.new(mode='L', size=size)
return img, target
def test_random_resize(sample_pair, size):
random_resize_transform = RandomResizePair(min_scale=0.5, max_scale=2.0, base_size=size)
# Test that the resized image remains within bounds for 10 iterations
for _ in range(10):
resized_img, resized_target = random_resize_transform(sample_pair)
assert resized_img.size == resized_target.size
assert resized_img.size[0] >= size[0] // 2 and resized_img.size[0] <= size[0] * 2
assert resized_img.size[1] >= size[1] // 2 and resized_img.size[1] <= size[1] * 2
@pytest.mark.parametrize('crop_size', [(8, 8), (32, 32)])
def test_random_crop(sample_pair, crop_size):
random_crop_transform = RandomCropPair(crop_size)
image, target = random_crop_transform(sample_pair)
assert image.size == target.size
final_size = min(crop_size[0], sample_pair[0].height), min(crop_size[1], sample_pair[0].width)
assert final_size == image.size
def test_random_hflip(sample_pair):
old_image, old_target = np.array(sample_pair[0]), np.array(sample_pair[1])
# Always flip
always_hflip_transform = RandomHFlipPair(probability=1.0)
new_image, new_target = always_hflip_transform(sample_pair)
new_image, new_target = np.array(new_image), np.array(new_target)
assert np.allclose(new_image, old_image[:, ::-1]) and np.allclose(new_target, old_target[:, ::-1])
# Never flip
always_hflip_transform = RandomHFlipPair(probability=0.0)
new_image, new_target = always_hflip_transform(sample_pair)
new_image, new_target = np.array(new_image), np.array(new_target)
assert np.allclose(new_image, old_image) and np.allclose(new_target, old_target)
@pytest.mark.parametrize('pad_size', [(32, 32), (8, 8)])
def test_pad_transform(sample_pair, pad_size):
image = sample_pair[0]
pad_transform = PadToSize(size=pad_size, fill=255)
padded_image = pad_transform(image)
final_size = max(pad_size[1], image.width), max(pad_size[0], image.height)
# Check for correct size and number of padding elements
assert padded_image.size == final_size
# Check appropriate amount of padding is used
padded_image = np.array(padded_image)
initial_area = image.width * image.height
final_area = final_size[0] * final_size[1]
n_channels = padded_image.shape[2]
pad_volume = n_channels * (final_area - initial_area)
assert pad_volume == (padded_image == 255).sum()
def test_photometric_distortion(sample_pair):
old_image = sample_pair[0]
# Test no transform case
photometric_transform = PhotometricDistoration(brightness=1.0, contrast=1.0, saturation=1.0, hue=0)
new_image = photometric_transform(old_image)
old_image, new_image = np.array(old_image), np.array(new_image)
assert np.allclose(old_image, new_image)
| composer-dev | tests/datasets/test_segmentation_transforms.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional, Type
from unittest.mock import Mock
import pytest
import torch
import torch.nn.functional as F
from torchvision.models.resnet import Bottleneck
from composer.algorithms import StochasticDepth
from composer.algorithms.stochastic_depth.stochastic_depth import _STOCHASTIC_LAYER_MAPPING, apply_stochastic_depth
from composer.algorithms.stochastic_depth.stochastic_layers import make_resnet_bottleneck_stochastic
from composer.core import Event, State
from composer.core.time import TimeUnit
from composer.models import composer_resnet
from composer.utils import module_surgery
@pytest.fixture()
def state(minimal_state: State):
"""stochastic depth tests require ResNet model."""
minimal_state.model = composer_resnet(model_name='resnet50', num_classes=100)
return minimal_state
@pytest.fixture()
def target_layer_name() -> str:
return 'ResNetBottleneck'
@pytest.fixture()
def stochastic_method():
return 'block'
def count_sd_forward(module: torch.nn.Module, target_block: Type[torch.nn.Module], count: int = 0):
if (len(list(module.children()))) == 0 and len(list(module.parameters())) > 0:
return count
else:
for child in module.children():
if isinstance(child, target_block) and hasattr(child, 'drop_rate'):
count += 1
count = count_sd_forward(child, target_block, count)
return count
@pytest.mark.parametrize('stochastic_method', ['block', 'sample'])
@pytest.mark.parametrize('target_layer_name', ['ResNetBottleneck'])
def test_sd_algorithm(state: State, stochastic_method: str, target_layer_name: str):
target_layer, _ = _STOCHASTIC_LAYER_MAPPING[target_layer_name]
target_block_count = module_surgery.count_module_instances(state.model, target_layer)
sd = StochasticDepth(stochastic_method=stochastic_method,
target_layer_name=target_layer_name,
drop_rate=0.5,
drop_distribution='linear',
drop_warmup=0.0)
sd.apply(Event.INIT, state, logger=Mock())
stochastic_forward_count = count_sd_forward(state.model, target_layer)
assert target_block_count == stochastic_forward_count
@pytest.mark.parametrize('stochastic_method', ['block', 'sample'])
@pytest.mark.parametrize('target_layer_name', ['ResNetBottleneck'])
def test_sd_functional(state: State, stochastic_method: str, target_layer_name: str):
target_layer, _ = _STOCHASTIC_LAYER_MAPPING[target_layer_name]
target_block_count = module_surgery.count_module_instances(state.model, target_layer)
apply_stochastic_depth(model=state.model,
stochastic_method=stochastic_method,
target_layer_name=target_layer_name,
drop_rate=0.5,
drop_distribution='linear')
stochastic_forward_count = count_sd_forward(state.model, target_layer)
assert target_block_count == stochastic_forward_count
class TestStochasticBottleneckForward:
@pytest.mark.parametrize('drop_rate', [1.0])
def test_block_stochastic_bottleneck_drop(self, drop_rate: float):
X = torch.randn(4, 4, 16, 16)
bottleneck_block = Bottleneck(inplanes=4, planes=1)
stochastic_block = make_resnet_bottleneck_stochastic(module=bottleneck_block,
module_index=0,
module_count=1,
drop_rate=drop_rate,
drop_distribution='linear',
stochastic_method='block')
stochastic_X = stochastic_block(X)
assert stochastic_X is X
@pytest.mark.parametrize('drop_rate', [0.0])
def test_block_stochastic_bottleneck_keep(self, drop_rate: float):
X = torch.randn(4, 4, 16, 16)
bottleneck_block = Bottleneck(inplanes=4, planes=1)
stochastic_block = make_resnet_bottleneck_stochastic(module=bottleneck_block,
module_index=0,
module_count=1,
drop_rate=drop_rate,
drop_distribution='linear',
stochastic_method='block')
stochastic_X = stochastic_block(X)
assert stochastic_X is not X
@pytest.mark.parametrize('drop_rate', [1.0])
def test_sample_stochastic_bottleneck_drop_all(self, drop_rate: float):
X = F.relu(torch.randn(4, 4, 16, 16)) # inputs and outputs will match if the input has been ReLUed
bottleneck_block = Bottleneck(inplanes=4, planes=1)
stochastic_block = make_resnet_bottleneck_stochastic(module=bottleneck_block,
module_index=0,
module_count=1,
drop_rate=drop_rate,
drop_distribution='linear',
stochastic_method='sample')
stochastic_X = stochastic_block(X)
assert torch.all(X == stochastic_X)
class TestStochasticDepthDropRate:
@pytest.fixture
def algorithm(
self,
target_layer_name: str,
stochastic_method: str,
drop_rate: float,
drop_distribution: str,
drop_warmup: str,
):
return StochasticDepth(
target_layer_name,
stochastic_method,
drop_rate,
drop_distribution,
drop_warmup,
)
def get_drop_rate_list(self, module: torch.nn.Module, drop_rates: Optional[List] = None):
if drop_rates is None:
drop_rates = []
if (len(list(module.children())) == 0 and len(list(module.parameters())) > 0):
return
else:
for _, child in module.named_children():
if hasattr(child, 'drop_rate'):
drop_rates.append(child.drop_rate)
self.get_drop_rate_list(child, drop_rates)
@pytest.mark.parametrize('step', [50, 100, 1000])
@pytest.mark.parametrize('drop_rate', [0.0, 0.5, 1.0])
@pytest.mark.parametrize('drop_distribution', ['uniform', 'linear'])
@pytest.mark.parametrize('drop_warmup', ['0.1dur'])
def test_drop_rate_warmup(self, algorithm: StochasticDepth, step: int, state: State):
old_drop_rates = []
self.get_drop_rate_list(state.model, drop_rates=old_drop_rates)
state.timestamp._batch._value = step
algorithm.apply(Event.BATCH_START, state, logger=Mock())
new_drop_rates = []
self.get_drop_rate_list(state.model, drop_rates=new_drop_rates)
assert state.max_duration is not None
assert state.max_duration.unit == TimeUnit.EPOCH
assert state.dataloader_len is not None
drop_warmup_iters = int(int(state.dataloader_len) * int(state.max_duration.value) * algorithm.drop_warmup)
assert torch.all(torch.tensor(new_drop_rates) == ((step / drop_warmup_iters) * torch.tensor(old_drop_rates)))
class TestStochasticDepthInputValidation():
@pytest.mark.parametrize('stochastic_method', ['nonsense'])
def test_invalid_method_name(self, stochastic_method: str, target_layer_name: str):
with pytest.raises(ValueError):
StochasticDepth(stochastic_method=stochastic_method, target_layer_name=target_layer_name)
@pytest.mark.parametrize('target_layer_name', ['nonsense_pt2'])
def test_invalid_layer_name(self, stochastic_method: str, target_layer_name: str):
with pytest.raises(ValueError):
StochasticDepth(stochastic_method=stochastic_method, target_layer_name=target_layer_name)
@pytest.mark.parametrize('drop_rate', [-0.5, 1.7])
def test_invalid_drop_rate(self, stochastic_method: str, target_layer_name: str, drop_rate: float):
with pytest.raises(ValueError):
StochasticDepth(
stochastic_method=stochastic_method,
target_layer_name=target_layer_name,
drop_rate=drop_rate,
)
@pytest.mark.parametrize('drop_distribution', ['nonsense_pt3'])
def test_invalid_drop_distribution(self, stochastic_method: str, target_layer_name: str, drop_distribution: str):
with pytest.raises(ValueError):
StochasticDepth(stochastic_method=stochastic_method,
target_layer_name=target_layer_name,
drop_distribution=drop_distribution)
| composer-dev | tests/algorithms/test_stochastic_depth.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import Mock
import pytest
import torch
from packaging import version
from torch import nn
import composer.algorithms.gradient_clipping.gradient_clipping as gc_module
from composer.algorithms.gradient_clipping import GradientClipping, apply_gradient_clipping
from composer.algorithms.gradient_clipping.gradient_clipping import _apply_agc, _get_clipped_gradient_coeff
from composer.core import Engine, State
from composer.core.event import Event
from tests.common import world_size
from tests.common.datasets import dummy_tiny_bert_classification_batch, dummy_transformer_classifier_batch
from tests.common.models import SimpleTransformerClassifier, configure_tiny_bert_config
def simple_model_with_grads():
# Set up small NN with one linear layer with no bias + softmax, so only
# one set of params and get some gradients.
N, hin, num_classes = 8, 4, 3
x = torch.rand((N, hin))
y = torch.randint(high=num_classes - 1, size=(N,))
model = nn.Sequential(nn.Linear(hin, num_classes, bias=False), nn.Softmax(dim=1))
# Force wrap every module in FSDP, to allow for testing FSDP
# gradient clipping properly.
for module in model:
module._fsdp_wrap = True
model._fsdp_wrap = True
o = model(x)
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(o, y)
loss.backward()
return model
def cnn_model_with_grads():
# Make a CNN with all the common parameters: bias, weight matrix, conv filters.
class myNN(nn.Module):
def __init__(self, n_ch, num_fmaps, h, num_classes, filter_size):
super().__init__()
self.conv_model = nn.Sequential(nn.Conv2d(n_ch, num_fmaps, kernel_size=filter_size), nn.ReLU())
self.mlp = nn.Sequential(nn.Linear(num_fmaps, h), nn.ReLU(), nn.Linear(h, h), nn.ReLU(),
nn.Linear(h, num_classes), nn.Softmax(dim=1))
def forward(self, x):
fmaps = self.conv_model(x)
vec = torch.mean(fmaps, dim=(2, 3))
out = self.mlp(vec)
return out
# Generate some gradients.
N, n_ch, num_fmaps, h, num_classes, filter_size = 8, 3, 4, 4, 3, 3
x = torch.rand((N, n_ch, 16, 16))
y = torch.randint(high=num_classes - 1, size=(N,))
model = myNN(n_ch, num_fmaps, h, num_classes, filter_size)
# Force wrap every module in FSDP, to allow for testing FSDP
# gradient clipping properly.
for layer in model.modules():
layer._fsdp_wrap = True
o = model(x)
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(o, y)
loss.backward()
return model
def simple_transformer_model_with_grads():
# Make a Transformer model.
model = SimpleTransformerClassifier(vocab_size=100, num_classes=3)
# Force wrap every module in FSDP, to allow for testing FSDP
# gradient clipping properly.
for layer in model.modules():
layer._fsdp_wrap = True
x = dummy_transformer_classifier_batch(num_classes=3)
o = model(x)
y = torch.randint(high=1, size=o.shape, dtype=o.dtype)
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(o, y)
loss.backward()
return model
def hf_model_with_grads():
# Make a HuggingFace BERT model.
transformers = pytest.importorskip('transformers')
from composer.models import HuggingFaceModel
tiny_bert_config = configure_tiny_bert_config()
tiny_bert_config.num_labels = 3 # type: ignore
hf_model = transformers.AutoModelForSequenceClassification.from_config(
tiny_bert_config) # type: ignore (thirdparty)
model = HuggingFaceModel(hf_model, metrics=[], use_logits=True)
# Force wrap every module in FSDP, to allow for testing FSDP
# gradient clipping properly.
for layer in model.modules():
layer._fsdp_wrap = True
x = dummy_tiny_bert_classification_batch(num_classes=3)
o = model(x).logits
y = torch.randint(high=1, size=o.shape, dtype=o.dtype)
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(o, y)
loss.backward()
return model
def test_gradient_clipping_functional(monkeypatch):
model = Mock()
new_gc_fn = Mock()
monkeypatch.setattr(gc_module, '_apply_agc', new_gc_fn)
apply_gradient_clipping(model, 'adaptive', 0.1, fsdp_enabled=False)
new_gc_fn.assert_called_once_with(model.parameters(), clipping_threshold=0.1)
new_gc_fn = Mock()
monkeypatch.setattr(torch.nn.utils, 'clip_grad_norm_', new_gc_fn)
apply_gradient_clipping(model, 'norm', 0.1, fsdp_enabled=False)
new_gc_fn.assert_called_once()
new_gc_fn = Mock()
monkeypatch.setattr(torch.nn.utils, 'clip_grad_value_', new_gc_fn)
apply_gradient_clipping(model, 'value', 0.1, fsdp_enabled=False)
new_gc_fn.assert_called_once()
@pytest.mark.parametrize('clipping_type', [('adaptive',), ('norm',), ('value',)])
@pytest.mark.parametrize(
'model_with_grads',
[simple_model_with_grads, cnn_model_with_grads, simple_transformer_model_with_grads, hf_model_with_grads])
def test_gradient_clipping_algorithm(monkeypatch, clipping_type, model_with_grads, dummy_state: State):
model = model_with_grads()
apply_gc_fn = Mock()
monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn)
state = dummy_state
state.model = model
state.callbacks = []
state.algorithms = [GradientClipping(clipping_type=clipping_type, clipping_threshold=0.01)]
logger = Mock()
engine = Engine(state, logger)
# Run the Event that should cause gradient_clipping.apply to be called.
engine.run_event(Event.AFTER_TRAIN_BATCH)
apply_gc_fn.assert_called_once()
@pytest.mark.parametrize(
'model_with_grads',
[simple_model_with_grads(),
cnn_model_with_grads(),
simple_transformer_model_with_grads(),
hf_model_with_grads()])
def test_gradient_clipping_algorithm_with_deepspeed_enabled(
monkeypatch: pytest.MonkeyPatch,
model_with_grads,
dummy_state: State,
):
clipping_threshold = 0.1191
apply_gc_fn = Mock()
monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn)
state = dummy_state
# Set clipping_type to norm to ensure that apply_gradient_clipping
# is not called.
state.algorithms = [GradientClipping(clipping_type='norm', clipping_threshold=clipping_threshold)]
# Enable deepspeed.
state.deepspeed_config = {}
model = model_with_grads
state.model = model
logger = Mock()
engine = Engine(state, logger)
# Run the Event that should cause gradient_clipping.apply to be called and deepspeed_config to be modified.
engine.run_event(Event.INIT)
# Make sure deepspeed_config's gradient_clipping field is set properly.
assert 'gradient_clipping' in state.deepspeed_config and state.deepspeed_config[
'gradient_clipping'] == clipping_threshold
# Make sure apply_gradient_clipping is not called.
apply_gc_fn.assert_not_called()
def _auto_wrap_policy(module: torch.nn.Module, recurse: bool, unwrapped_params: int) -> bool:
if recurse:
return True
if hasattr(module, '_fsdp_wrap'):
return bool(module._fsdp_wrap)
return False
@pytest.mark.parametrize('model_with_grads', [
simple_model_with_grads, cnn_model_with_grads,
pytest.param(simple_transformer_model_with_grads,
marks=pytest.mark.xfail(reason='SimpleTransformerBase cannot be recursively FSDP wrapped.')),
hf_model_with_grads
])
@pytest.mark.parametrize('clipping_type', ['norm', 'value'])
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
@pytest.mark.gpu
@world_size(2)
def test_gradient_clipping_algorithm_with_fsdp_enabled_does_not_error(
monkeypatch,
model_with_grads,
clipping_type,
dummy_state: State,
world_size: int,
):
from torch.distributed.fsdp import FullyShardedDataParallel
clipping_threshold = 0.1191
state = dummy_state
state.model = FullyShardedDataParallel(model_with_grads(),
auto_wrap_policy=_auto_wrap_policy,
device_id=torch.cuda.current_device())
state.algorithms = [GradientClipping(clipping_type=clipping_type, clipping_threshold=clipping_threshold)]
logger = Mock()
engine = Engine(state, logger)
engine.run_event(Event.AFTER_TRAIN_BATCH)
@pytest.mark.parametrize(
'model_with_grads',
[simple_model_with_grads, cnn_model_with_grads, simple_transformer_model_with_grads, hf_model_with_grads])
def test_algorithm_with_deepspeed_enabled_errors_out_for_non_norm(
monkeypatch: pytest.MonkeyPatch,
dummy_state: State,
model_with_grads,
):
clipping_threshold = 0.1191
apply_gc_fn = Mock()
monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn)
state = dummy_state
# Enable deepspeed and set clipping_type to norm to ensure that apply_gradient_clipping
# is not called.
state.algorithms = [GradientClipping(clipping_type='value', clipping_threshold=clipping_threshold)]
state.deepspeed_config = {}
model = model_with_grads()
state.model = model
logger = Mock()
engine = Engine(state, logger)
# Clipping type is not set to norm and deepspeed is enabled so NotImplementedError should be raised.
with pytest.raises(NotImplementedError):
engine.run_event(Event.INIT)
# Clipping threshold is less than zero and deepspeed is enabled so NotImplementedError should be raised.
state.algorithms = [GradientClipping(clipping_type='norm', clipping_threshold=-2.0)]
with pytest.raises(ValueError):
engine.run_event(Event.INIT)
#### Tests Specific to AGC ######
@pytest.mark.parametrize(
'model_with_grads',
[simple_model_with_grads, cnn_model_with_grads, simple_transformer_model_with_grads, hf_model_with_grads])
def test_apply_agc(model_with_grads):
model = model_with_grads()
# Make sure after calling apply_agc, the gradients inside the model are
# the same as if we manually called _get_clipped_gradients on the weights and
# gradients.
weights = next(model.parameters())
grad = weights.grad
expected_clipped_grad = grad.detach() * _get_clipped_gradient_coeff(weights, grad)
_apply_agc(model.parameters(), 0.01)
current_grad = next(model.parameters()).grad
torch.equal(current_grad, expected_clipped_grad)
@pytest.mark.parametrize(
'model_with_grads',
[simple_model_with_grads(),
cnn_model_with_grads(),
simple_transformer_model_with_grads(),
hf_model_with_grads()])
def test_apply_agc_does_not_error(model_with_grads):
"""This test is just to ensure that no errors are raised.
Accuracy of the AGC calculations are tested in other tests.
"""
model = model_with_grads
# Call apply_agc. If this function returns then we know that nothing errored out.
_apply_agc(model.parameters(), 0.01)
def test_get_clipped_gradients_1D():
weights = torch.Tensor([3., 4.])
grad = torch.Tensor([7., 24.])
clipping_threshold = 0.5
expected = torch.Tensor([0.7, 2.4])
clipped_grads = grad * _get_clipped_gradient_coeff(
weights=weights, grad=grad, clipping_threshold=clipping_threshold)
assert torch.equal(clipped_grads, expected)
@pytest.mark.parametrize('weights,grad,expected',
[(torch.Tensor([0., 0.]), torch.Tensor([1., 1.]), torch.Tensor([0., 0.])),
(torch.Tensor([1., 1.]), torch.Tensor([0., 0.]), torch.Tensor([0., 0.])),
(torch.Tensor([0., 0.]), torch.Tensor([0., 0.]), torch.Tensor([0., 0.]))])
def test_get_clipped_gradients_1D_with_zeros(weights: torch.Tensor, grad: torch.Tensor, expected: torch.Tensor):
clipping_threshold = 1e-4
clipped_grads = grad * _get_clipped_gradient_coeff(
weights=weights, grad=grad, clipping_threshold=clipping_threshold)
assert torch.equal(clipped_grads, expected)
def test_get_clipped_gradients_2D():
weights = torch.Tensor([[3., 4.], [9., 40.]])
grad = torch.Tensor([[7., 24.], [5., 12.]])
clipping_threshold = 0.5
expected = torch.Tensor([[0.7, 2.4], [5., 12.]])
clipped_grads = grad * _get_clipped_gradient_coeff(
weights=weights, grad=grad, clipping_threshold=clipping_threshold)
assert torch.equal(clipped_grads, expected)
def test_get_clipped_gradients_3D():
weights = torch.Tensor([[[3., 8.], [2., 2.]], [[1., 3.], [3., 9.]]])
grad = torch.Tensor([[[1., 1.], [3., 5.]], [[1., 1.], [1., 1.]]])
clipping_threshold = 1 / 3.
expected = torch.Tensor([[[0.5000, 0.5000], [1.5000, 2.5000]], [[1.0000, 1.0000], [1.0000, 1.0000]]])
clipped_grads = grad * _get_clipped_gradient_coeff(
weights=weights, grad=grad, clipping_threshold=clipping_threshold)
assert torch.equal(clipped_grads, expected)
def test_get_clipped_gradients_4D():
weights = torch.Tensor([[[[3.], [8.]], [[2.], [2.]]], [[[1.], [3.]], [[3.], [9.]]]])
grad = torch.Tensor([[[[1.], [1.]], [[3.], [5.]]], [[[1.], [1.]], [[1.], [1.]]]])
clipping_threshold = 1 / 3.
expected = torch.Tensor([[[[0.5], [0.5]], [[1.5], [2.5]]], [[[1.0], [1.0]], [[1.0], [1.0]]]])
clipped_grads = grad * _get_clipped_gradient_coeff(
weights=weights, grad=grad, clipping_threshold=clipping_threshold)
assert torch.equal(clipped_grads, expected)
| composer-dev | tests/algorithms/test_gradient_clipping.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Test Ghost Batch Normalization, both as an algorithm and module."""
import contextlib
import math
from typing import Any, Dict, Optional, Sequence, Union, cast
from unittest.mock import MagicMock, Mock
import pytest
import torch
from torchmetrics import Metric
from composer.algorithms import ghost_batchnorm as ghostbn
from composer.algorithms.ghost_batchnorm.ghost_batchnorm import GhostBatchNorm, _GhostBatchNorm
from composer.core import Batch, Event, State
from composer.models import ComposerModel
from composer.utils import module_surgery
_GHOSTBN_MODULE_CLASS = _GhostBatchNorm
_GHOSTBN_CORRECT_EVENT = Event.INIT
_TEST_NUM_DIMS = [1, 2, 3]
_TEST_GHOST_BATCH_SIZES = [1, 2, 3, 5]
_TEST_BATCH_SIZES = [12] # multiple of some, but not all, ghost batch sizes
class ModuleWithBatchnorm(ComposerModel):
def __init__(self, num_dims, num_features=4):
super().__init__()
eps = 0 # makes it easier to check normalization correctness
factory_func = {
1: torch.nn.BatchNorm1d,
2: torch.nn.BatchNorm2d,
3: torch.nn.BatchNorm3d,
}
self.bn = factory_func[num_dims](num_features, eps=eps)
self.num_dims = num_dims
self.num_features = num_features
self.non_batchnorm_module = torch.nn.Conv2d(4, 5, (1, 1))
def forward(self, input: torch.Tensor):
return self.bn(input)
def loss(self, outputs: Any, batch: Batch, *args, **kwargs) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
raise NotImplementedError()
def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
raise NotImplementedError()
def eval_forward(self, batch: Batch, outputs: Optional[Any] = None):
raise NotImplementedError()
@pytest.fixture
def state(num_dims: int) -> State:
return MagicMock(model=ModuleWithBatchnorm(num_dims=num_dims))
@pytest.fixture
def algo_instance(ghost_batch_size: int):
return GhostBatchNorm(ghost_batch_size=ghost_batch_size)
@pytest.mark.parametrize('num_dims', [1, 2, 3, 4, -1])
def test_batchnorm_gets_replaced_functional(num_dims: int):
if num_dims < 1 or num_dims > 3:
ctx = pytest.raises(KeyError)
else:
ctx = contextlib.nullcontext()
with ctx:
"""GhostBatchNorm{1,2,3}d should work, but other ints should throw."""
module = ModuleWithBatchnorm(num_dims)
assert module_surgery.count_module_instances(module, _GHOSTBN_MODULE_CLASS) == 0
ghostbn.apply_ghost_batchnorm(module, ghost_batch_size=1)
assert module_surgery.count_module_instances(module, _GHOSTBN_MODULE_CLASS) == 1
@pytest.mark.parametrize('num_dims', _TEST_NUM_DIMS)
@pytest.mark.parametrize('ghost_batch_size', _TEST_GHOST_BATCH_SIZES)
@pytest.mark.parametrize('batch_size', _TEST_BATCH_SIZES)
class TestGhostBatchesNormalized:
def _assert_ghost_batches_normalized(self, module: ModuleWithBatchnorm, ghost_batch_size: int,
batch_size: int) -> None:
torch.manual_seed(123)
size = [batch_size, module.num_features] + ([3] * module.num_dims)
X = torch.randn(size=size)
module.train()
out = module(X)
n_channels = out.shape[1]
# reduce over everything but channel idx
reduce_dims = (0,) + tuple(range(2, out.ndim))
nchunks = int(math.ceil(batch_size / ghost_batch_size))
for ghost_batch in out.chunk(nchunks):
channel_variances, channel_means = torch.var_mean(ghost_batch, dim=reduce_dims, unbiased=False)
torch.testing.assert_close(channel_variances, torch.ones(n_channels))
torch.testing.assert_close(channel_means, torch.zeros(n_channels))
def test_normalization_correct_functional(self, num_dims: int, ghost_batch_size: int, batch_size: int) -> None:
module = ModuleWithBatchnorm(num_dims=num_dims)
ghostbn.apply_ghost_batchnorm(module, ghost_batch_size=ghost_batch_size)
self._assert_ghost_batches_normalized(module=module, ghost_batch_size=ghost_batch_size, batch_size=batch_size)
def test_normalization_correct_algorithm(self, state, algo_instance, num_dims: int, ghost_batch_size: int,
batch_size: int) -> None:
algo_instance.apply(_GHOSTBN_CORRECT_EVENT, state, logger=Mock())
module = cast(ModuleWithBatchnorm, state.model)
self._assert_ghost_batches_normalized(module=module, ghost_batch_size=ghost_batch_size, batch_size=batch_size)
@pytest.mark.parametrize('ghost_batch_size', [4])
def test_correct_event_matches(algo_instance):
assert algo_instance.match(_GHOSTBN_CORRECT_EVENT, Mock(side_effect=ValueError))
@pytest.mark.parametrize('ghost_batch_size', [4])
@pytest.mark.parametrize('event', Event) # enum iteration
def test_incorrect_event_does_not_match(event: Event, algo_instance):
if event == _GHOSTBN_CORRECT_EVENT:
return
assert not algo_instance.match(event, Mock(side_effect=ValueError))
@pytest.mark.parametrize('ghost_batch_size', [4])
@pytest.mark.parametrize('num_dims', [2])
def test_algorithm_logging(state, algo_instance):
logger_mock = Mock()
algo_instance.apply(Event.INIT, state, logger_mock)
logger_mock.log_hyperparameters.assert_called_once_with({
'GhostBatchNorm/num_new_modules': 1,
})
| composer-dev | tests/algorithms/test_ghost_batchnorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import numpy as np
import pytest
import torch
from composer.algorithms import EMA
from composer.algorithms.ema.ema import EMAParameters, compute_ema
from composer.core import Event, Time, Timestamp, TimeUnit
from tests.common import SimpleConvModel, SimpleTransformerClassifier
from tests.common.models import configure_tiny_bert_hf_model
def validate_ema(model, original_model, ema_model, smoothing):
model_params, model_buffers = dict(model.named_parameters()), dict(model.named_buffers())
original_params, original_buffers = dict(original_model.named_parameters()), dict(original_model.named_buffers())
ema_params, ema_buffers = dict(ema_model.named_parameters()), dict(ema_model.named_buffers())
for name, param in model_params.items():
new_param = (original_params[name] * smoothing + (1. - smoothing) * param)
torch.testing.assert_close(ema_params[name].data, new_param)
for name, buffer in model_buffers.items():
new_buffer = (original_buffers[name] * smoothing + (1. - smoothing) * buffer).type(ema_buffers[name].data.dtype)
torch.testing.assert_close(ema_buffers[name].data, new_buffer)
def validate_model(model1, model2):
model1_params, model1_buffers = dict(model1.named_parameters()), dict(model1.named_buffers())
model2_params, model2_buffers = dict(model2.named_parameters()), dict(model2.named_buffers())
for name, _ in model1_params.items():
torch.testing.assert_close(model1_params[name].data, model2_params[name].data)
for name, _ in model1_buffers.items():
torch.testing.assert_close(model1_buffers[name].data, model2_buffers[name].data)
@pytest.mark.parametrize('smoothing', [0, 0.5, 0.99, 1])
@pytest.mark.parametrize('model_cls', [(SimpleConvModel), (SimpleTransformerClassifier),
(configure_tiny_bert_hf_model)])
def test_ema(smoothing, model_cls):
model = model_cls()
ema_model = model_cls()
original_model = copy.deepcopy(ema_model)
compute_ema(model=model, ema_model=ema_model, smoothing=smoothing)
validate_ema(model, original_model, ema_model, smoothing)
# params = [(half_life, update_interval)]
@pytest.mark.parametrize('params', [{
'half_life': '10ba',
'update_interval': '1ba'
}, {
'half_life': '1ep',
'update_interval': '1ep'
}, {
'smoothing': 0.999,
'update_interval': '1ba'
}])
@pytest.mark.parametrize('model_cls', [(SimpleConvModel), (SimpleTransformerClassifier),
(configure_tiny_bert_hf_model)])
def test_ema_algorithm(params, model_cls, minimal_state, empty_logger):
# Initialize input tensor
input = torch.rand((32, 5))
if 'smoothing' in params:
smoothing, update_interval = params['smoothing'], params['update_interval']
algorithm = EMA(half_life=None, smoothing=smoothing, update_interval=update_interval)
else:
half_life, update_interval = params['half_life'], params['update_interval']
algorithm = EMA(half_life=half_life, update_interval=update_interval)
state = minimal_state
state.model = model_cls()
state.batch = (input, torch.Tensor())
# Start EMA
algorithm.ema_model = EMAParameters(state.model)
# Check if ema correctly calculated smoothing
update_interval = Time.from_timestring(params['update_interval'])
if 'half_life' in params:
half_life = Time.from_timestring(params['half_life'])
smoothing = np.exp(-np.log(2) * (update_interval.value / half_life.value))
np.testing.assert_allclose(np.asarray(smoothing), np.asarray(algorithm.smoothing))
# Fake a training update by replacing state.model after ema grabbed it.
original_model = copy.deepcopy(state.model)
state.model = model_cls()
training_updated_model = copy.deepcopy(state.model)
# Do the EMA update
state.timestamp = Timestamp()
if update_interval.unit == TimeUnit.BATCH:
state.timestamp._batch = update_interval
algorithm.apply(Event.BATCH_END, state, empty_logger)
elif update_interval.unit == TimeUnit.EPOCH:
state.timestamp._epoch = update_interval
algorithm.apply(Event.EPOCH_END, state, empty_logger)
else:
raise ValueError(f'Invalid time string for parameter half_life')
# Check if EMA correctly computed the average.
validate_ema(state.model, original_model, algorithm.ema_model, algorithm.smoothing)
ema_updated_model = copy.deepcopy(algorithm.ema_model)
# Check if the EMA model is swapped in for testing
algorithm.apply(Event.EVAL_START, state, empty_logger)
validate_model(state.model, ema_updated_model)
# Check if the training model is swapped back in for training
algorithm.apply(Event.EVAL_END, state, empty_logger)
validate_model(state.model, training_updated_model)
| composer-dev | tests/algorithms/test_ema.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.nn.utils.parametrize as parametrize
from composer.algorithms import WeightStandardization
from composer.core import Event, State
from composer.functional import apply_weight_standardization
from composer.loggers import Logger
from tests.common import SimpleConvModel
def _count_parametrize(module: torch.nn.Module):
count = 0
for m in module.modules():
if parametrize.is_parametrized(m, 'weight'):
count += 1
return count
def test_ws_calculation():
"""Check if convolution weights are properly standardized."""
model = SimpleConvModel()
apply_weight_standardization(module=model)
var1, mean1 = torch.var_mean(model.conv1.weight, dim=[1, 2, 3], unbiased=False)
var2, mean2 = torch.var_mean(model.conv2.weight, dim=[1, 2, 3], unbiased=False)
torch.testing.assert_close(var1, torch.ones_like(var1))
torch.testing.assert_close(var2, torch.ones_like(var2))
torch.testing.assert_close(mean1, torch.zeros_like(mean1))
torch.testing.assert_close(mean2, torch.zeros_like(mean2))
@pytest.mark.parametrize('n_last_layers_ignore', [0, 1, 3])
def test_ws_replacement(n_last_layers_ignore: int):
"""Check if the proper number of layers have been parametrized."""
model = SimpleConvModel()
apply_weight_standardization(module=model, n_last_layers_ignore=n_last_layers_ignore)
ws_count = _count_parametrize(model)
expected_count = max(2 - n_last_layers_ignore, 0) # Expected number of weight standardization layers
assert ws_count == expected_count
@pytest.mark.parametrize('n_last_layers_ignore', [0, 1, 3])
def test_ws_algorithm(n_last_layers_ignore: int, minimal_state: State, empty_logger: Logger):
"""Check if the algorithm is applied at the proper event."""
minimal_state.model = SimpleConvModel()
ws_algorithm = WeightStandardization(n_last_layers_ignore=n_last_layers_ignore)
ws_algorithm.apply(Event.INIT, minimal_state, empty_logger)
ws_count = _count_parametrize(minimal_state.model)
expected_count = max(2 - n_last_layers_ignore, 0)
assert ws_count == expected_count
| composer-dev | tests/algorithms/test_weight_standardization.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""This file provides the canonical settings (dataset, model, algorithms, arguments)
for each algorithm to be tested. This can be used throughout the codebase for
functional tests, serialization tests, etc.
Each algorithm is keyed based on its name in the algorithm registry.
"""
from typing import Any, Dict, Optional, Type
import pytest
from torch.utils.data import DataLoader
import composer
import composer.algorithms
from composer import Algorithm
from composer.algorithms import (EMA, SAM, SWA, Alibi, AugMix, BlurPool, ChannelsLast, ColOut, CutMix, CutOut,
Factorize, FusedLayerNorm, GatedLinearUnits, GhostBatchNorm, GradientClipping,
GyroDropout, LabelSmoothing, LayerFreezing, LowPrecisionGroupNorm,
LowPrecisionLayerNorm, MixUp, NoOpModel, ProgressiveResizing, RandAugment,
SelectiveBackprop, SeqLengthWarmup, SqueezeExcite, StochasticDepth,
WeightStandardization)
from composer.models import composer_resnet
from composer.models.base import ComposerModel
from composer.utils import dist
from tests.common import get_module_subclasses
from tests.common.datasets import RandomImageDataset, SimpleDataset, dummy_bert_lm_dataloader, dummy_gpt_lm_dataloader
from tests.common.models import (SimpleConvModel, SimpleModelWithDropout, configure_tiny_bert_hf_model,
configure_tiny_gpt2_hf_model)
simple_bert_settings = {
'model': configure_tiny_bert_hf_model,
'dataloader': (dummy_bert_lm_dataloader, {
'size': 8
}),
'kwargs': {},
}
simple_gpt2_settings = {
'model': configure_tiny_gpt2_hf_model,
'dataloader': (dummy_gpt_lm_dataloader, {
'size': 8
}),
'kwargs': {},
}
simple_vision_settings = {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {},
}
simple_vision_pil_settings = {
'model': SimpleConvModel,
'dataset': (RandomImageDataset, {
'is_PIL': True
}),
'kwargs': {},
}
simple_resnet_settings = {
'model': (composer_resnet, {
'model_name': 'resnet18',
'num_classes': 2
}),
'dataset': (RandomImageDataset, {
'shape': (3, 224, 224),
}),
'kwargs': {},
}
_settings: Dict[Type[Algorithm], Optional[Dict[str, Any]]] = {
GradientClipping: {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {
'clipping_type': 'norm',
'clipping_threshold': 0.1
},
},
Alibi: {
'model': configure_tiny_bert_hf_model,
'dataloader': (dummy_bert_lm_dataloader, {
'size': 8
}),
'kwargs': {
'max_sequence_length': 256
},
},
AugMix: simple_vision_settings,
BlurPool: {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {
'min_channels': 0,
},
},
ChannelsLast: simple_vision_settings,
ColOut: simple_vision_settings,
CutMix: {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {},
},
CutOut: simple_vision_settings,
EMA: {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {
'half_life': '1ba',
},
},
Factorize: simple_resnet_settings,
FusedLayerNorm: simple_bert_settings,
GatedLinearUnits: simple_bert_settings,
GhostBatchNorm: {
'model': (SimpleConvModel, {
'norm': 'group',
}),
'dataset': RandomImageDataset,
'kwargs': {
'ghost_batch_size': 2,
}
},
LabelSmoothing: simple_vision_settings,
LayerFreezing: simple_vision_settings,
LowPrecisionLayerNorm: simple_bert_settings,
LowPrecisionGroupNorm: {
'model': (SimpleConvModel, {
'norm': 'group',
}),
'dataset': RandomImageDataset,
'kwargs': {},
},
MixUp: simple_vision_settings,
ProgressiveResizing: simple_vision_settings,
RandAugment: simple_vision_settings,
NoOpModel: simple_vision_settings,
SAM: simple_vision_settings,
SelectiveBackprop: simple_vision_settings,
SeqLengthWarmup: {
'model': configure_tiny_bert_hf_model,
'dataloader': (dummy_bert_lm_dataloader, {
'size': 8
}),
'kwargs': {
'duration': 0.5,
'min_seq_length': 8,
'max_seq_length': 16
},
},
SqueezeExcite: simple_resnet_settings,
StochasticDepth: {
'model': (composer_resnet, {
'model_name': 'resnet50',
'num_classes': 2
}),
'dataset': (RandomImageDataset, {
'shape': (3, 224, 224),
}),
'kwargs': {
'stochastic_method': 'block',
'target_layer_name': 'ResNetBottleneck',
'drop_rate': 0.2,
'drop_distribution': 'linear',
'drop_warmup': '0.0dur',
}
},
SWA: {
'model': SimpleConvModel,
'dataset': RandomImageDataset,
'kwargs': {
'swa_start': '0.2dur',
'swa_end': '0.97dur',
'update_interval': '1ep',
'schedule_swa_lr': True,
}
},
WeightStandardization: simple_vision_settings,
GyroDropout: {
'model': SimpleModelWithDropout,
'dataloader': (DataLoader, {
'dataset': SimpleDataset(batch_size=2, feature_size=64, num_classes=10)
}),
'kwargs': {
'p': 0.5,
'sigma': 2,
'tau': 1
}
},
}
def _get_alg_settings(alg_cls: Type[Algorithm]):
if alg_cls not in _settings or _settings[alg_cls] is None:
raise ValueError(f'Algorithm {alg_cls.__name__} not in the settings dictionary.')
settings = _settings[alg_cls]
assert settings is not None
return settings
def get_alg_kwargs(alg_cls: Type[Algorithm]) -> Dict[str, Any]:
"""Return the kwargs for an algorithm."""
return _get_alg_settings(alg_cls)['kwargs']
def get_alg_model(alg_cls: Type[Algorithm]) -> ComposerModel:
"""Return an instance of the model for an algorithm."""
settings = _get_alg_settings(alg_cls)['model']
if isinstance(settings, tuple):
(cls, kwargs) = settings
else:
(cls, kwargs) = (settings, {})
return cls(**kwargs)
def get_alg_dataloader(alg_cls: Type[Algorithm], multigpu=False) -> DataLoader:
"""Return an instance of the dataset for an algorithm."""
settings = _get_alg_settings(alg_cls)
if 'dataloader' in settings:
dataloader_cls, kwargs = settings['dataloader']
if 'dataset' in kwargs and multigpu:
kwargs['sampler'] = dist.get_sampler(kwargs['dataset'])
dataloader = dataloader_cls(**kwargs)
elif 'dataset' in settings:
if isinstance(settings['dataset'], tuple):
dataset_cls, kwargs = settings['dataset']
else:
dataset_cls = settings['dataset']
kwargs = {}
dataset = dataset_cls(**kwargs)
sampler = dist.get_sampler(dataset) if multigpu else None
dataloader = DataLoader(dataset=dataset, batch_size=4, sampler=sampler)
else:
raise ValueError(f'Neither dataset nor dataloader have been provided for algorithm {alg_cls}')
return dataloader
def get_algs_with_marks():
"""Returns a list of algorithms appropriate markers for a subsequent call to pytest.mark.parameterize.
It applies markers as appropriate (e.g. XFAIL for algs missing config)
It reads from the algorithm registry
E.g. @pytest.mark.parametrize("alg_class", get_algs_with_marks())
"""
ans = []
for alg_cls in get_module_subclasses(composer.algorithms, Algorithm):
marks = []
settings = _settings[alg_cls]
if alg_cls in (Alibi, GatedLinearUnits, SeqLengthWarmup):
try:
import transformers
transformers_available = True
del transformers
except ImportError:
transformers_available = False
marks.append(pytest.mark.skipif(not transformers_available, reason='transformers not available'))
if alg_cls == SWA:
# TODO(matthew): Fix
marks.append(
pytest.mark.filterwarnings(
r'ignore:Detected call of `lr_scheduler.step\(\)` before `optimizer.step\(\)`:UserWarning'))
marks.append(
pytest.mark.filterwarnings('ignore:SWA has known issues when resuming from a checkpoint.*:UserWarning'))
if alg_cls == GyroDropout:
marks.append(
pytest.mark.filterwarnings(
'ignore:GyroDropout is not implemented in a way that allows correct resumption.*:UserWarning'))
if alg_cls == SAM:
marks.append(
pytest.mark.filterwarnings(
'ignore:SAM has known issues of weight mismatch when loading from a checkpoint.*:UserWarning'))
if alg_cls == MixUp:
# TODO(Landen): Fix
marks.append(
pytest.mark.filterwarnings(r'ignore:Some targets have less than 1 total probability:UserWarning'))
if alg_cls == FusedLayerNorm:
# FusedLayerNorm requires a GPU in order for the class to exist
marks.append(pytest.mark.gpu)
if alg_cls == SelectiveBackprop:
marks.append(
pytest.mark.filterwarnings(
r'ignore:Cannot split tensor of length .* into batches of size .*:UserWarning'))
if settings is None:
marks.append(pytest.mark.xfail(reason=f'Algorithm {alg_cls.__name__} is missing settings.'))
ans.append(pytest.param(alg_cls, marks=marks, id=alg_cls.__name__))
return ans
| composer-dev | tests/algorithms/algorithm_settings.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.nn.functional import gelu, relu
from composer.algorithms.gated_linear_units import GatedLinearUnits, apply_gated_linear_units
from composer.algorithms.gated_linear_units.gated_linear_unit_layers import BERTGatedFFOutput
from composer.core import Event, State
from composer.devices import DeviceCPU
from composer.loggers import Logger
from tests.common.datasets import dummy_bert_lm_dataloader, dummy_text_classification_dataloader
from tests.common.models import SimpleTransformerClassifier, configure_tiny_bert_hf_model
def _layernorm(input_tensor, layernorm_eps):
mean = torch.mean(input_tensor, dim=-1, keepdim=True)
var = torch.square(input_tensor - mean).mean(dim=-1, keepdim=True)
return (input_tensor - mean) / torch.sqrt(var + layernorm_eps)
@pytest.mark.parametrize('batch_size', [1])
@pytest.mark.parametrize('seq_length', [128, 512])
@pytest.mark.parametrize('d_embed', [768])
@pytest.mark.parametrize('d_ff', [3072])
@pytest.mark.parametrize('dropout_rate', [0.0])
@pytest.mark.parametrize('act_fn', [relu, gelu])
@pytest.mark.parametrize('layernorm_eps', [1e-6])
def test_glu_outputs(batch_size, seq_length, d_embed, d_ff, dropout_rate, act_fn, layernorm_eps):
gated_ff = BERTGatedFFOutput(d_embed=d_embed,
d_ff=d_ff,
dropout_rate=dropout_rate,
act_fn=act_fn,
layernorm_eps=layernorm_eps,
gated_layer_bias=False,
non_gated_layer_bias=False)
hidden_states = torch.rand(batch_size, seq_length, d_embed)
residual_connection = torch.zeros_like(hidden_states)
model_output = gated_ff(hidden_states, residual_connection)
# get rid of the batch dimension when computing the result by hand
hidden_states = hidden_states[1:]
manual_output = torch.matmul(hidden_states, gated_ff.gated_layer.weight.transpose(0, 1))
manual_output = act_fn(manual_output)
manual_output = manual_output * torch.matmul(hidden_states, gated_ff.non_gated_layer.weight.transpose(0, 1))
manual_output = torch.matmul(manual_output, gated_ff.wo.weight.transpose(0, 1)) + gated_ff.wo.bias
manual_output = _layernorm(manual_output + residual_connection, layernorm_eps)
assert torch.allclose(manual_output, model_output)
def assert_is_glu_instance(model):
pytest.importorskip('transformers')
from transformers import BertForMaskedLM, BertForSequenceClassification
from transformers.models.bert.modeling_bert import BertOutput
assert isinstance(model, BertForMaskedLM) or isinstance(model, BertForSequenceClassification)
# ensure that within the entire model, no BertOutput exists, and at least one BERTGatedFFOutput does.
assert model.modules is not None, 'model has .modules method'
for module_class in model.modules():
assert not isinstance(
module_class, BertOutput
), 'A transformers.models.bert.modeling_bert.BertOutput should not be found in the model after surgery is applied.'
assert any(
isinstance(module_class, BERTGatedFFOutput) for module_class in model.modules()
), 'composer.algorithms.gated_linear_units.gated_linear_unit_layers.BERTGatedFFOutput is not found in the post-surgery model.'
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(pytest.param(
SimpleTransformerClassifier,
dummy_text_classification_dataloader,
marks=pytest.mark.xfail(reason='Gated Linear Units does not currently support non-HuggingFace models'))),
])
def test_gated_linear_units_functional(model, dataloader):
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=DeviceCPU(),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
apply_gated_linear_units(state.model, state.optimizers)
assert_is_glu_instance(state.model.model)
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(pytest.param(
SimpleTransformerClassifier,
dummy_text_classification_dataloader,
marks=pytest.mark.xfail(reason='Gated Linear Units does not currently support non-HuggingFace models'))),
])
def test_gated_linear_units_algorithm(model, dataloader, empty_logger: Logger):
pytest.importorskip('transformers')
from transformers import BertForMaskedLM, BertForSequenceClassification
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=DeviceCPU(),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
gated_linear_units = GatedLinearUnits()
# state.model wrapped in HuggingFaceModel wrapped
assert isinstance(state.model.model, BertForMaskedLM) or isinstance(state.model.model,
BertForSequenceClassification)
gated_linear_units.apply(Event.INIT, state, empty_logger)
assert_is_glu_instance(state.model.model)
| composer-dev | tests/algorithms/test_gated_linear_units.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.nn import LayerNorm
from composer.algorithms.fused_layernorm import FusedLayerNorm, apply_fused_layernorm
from composer.core import Event, State
from composer.loggers import Logger
from composer.models.huggingface import HuggingFaceModel
from composer.utils import get_device
from tests.common import device
from tests.common.datasets import dummy_bert_lm_dataloader, dummy_text_classification_dataloader
from tests.common.models import SimpleTransformerClassifier, configure_tiny_bert_hf_model
def assert_is_fln_instance(model):
pytest.importorskip('apex')
from apex.normalization.fused_layer_norm import FusedLayerNorm as APEXFusedLayerNorm
# When checking modules of a HuggingFace model, we need to parse the model object it wraps
# This is not necessary for SimpleTransformerClassifier models.
if isinstance(model, HuggingFaceModel):
model = model.model
# ensure that within the entire model, no PyTorch LayerNorm exists, and at least one APEX FLN does.
assert model.modules is not None, 'model has .modules method'
for module_class in model.modules():
assert not isinstance(
module_class, LayerNorm), 'A torch.nn.LayerNorm should not be found in the model after surgery is applied.'
assert any(isinstance(module_class, APEXFusedLayerNorm) for module_class in model.modules()
), 'apex.normalization.fused_layer_norm is not found in the post-surgery model.'
@device('gpu')
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(SimpleTransformerClassifier, dummy_text_classification_dataloader),
])
def test_fused_layernorm_functional(model, dataloader, device: str, request: pytest.FixtureRequest):
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=get_device(device),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
if device == 'gpu':
state.model = state.model.cuda() # move the model to gpu
apply_fused_layernorm(state.model, state.optimizers)
assert_is_fln_instance(state.model)
@device('gpu')
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(SimpleTransformerClassifier, dummy_text_classification_dataloader),
])
def test_fused_layernorm_algorithm(model, dataloader, empty_logger: Logger, device: str):
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=get_device(device),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
fused_layernorm = FusedLayerNorm()
if device == 'gpu':
state.model = state.model.cuda() # move the model to gpu
fused_layernorm.apply(Event.INIT, state, empty_logger)
assert_is_fln_instance(state.model)
| composer-dev | tests/algorithms/test_fused_layernorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import os
import pathlib
from typing import Type
import pytest
import torch
from composer import Algorithm, Trainer
from composer.algorithms import SAM, SWA, GyroDropout, LayerFreezing, SeqLengthWarmup, StochasticDepth
from composer.utils import dist
from tests.algorithms.algorithm_settings import get_alg_dataloader, get_alg_kwargs, get_alg_model, get_algs_with_marks
from tests.common import deep_compare
from tests.common.markers import world_size
@pytest.mark.gpu
@pytest.mark.parametrize('alg_cls', get_algs_with_marks())
@pytest.mark.filterwarnings('ignore:Detected call of `lr_scheduler.step()'
) # optimizer.step() sometimes skipped when NaN/inf on low batch size
@world_size(1, 2)
def test_algorithm_resumption(
tmp_path: pathlib.Path,
alg_cls: Type[Algorithm],
world_size,
):
folder1 = os.path.join(tmp_path, 'folder1')
folder2 = os.path.join(tmp_path, 'folder2')
os.makedirs(folder1, exist_ok=True)
os.makedirs(folder2, exist_ok=True)
model = get_alg_model(alg_cls)
alg_kwargs = get_alg_kwargs(alg_cls)
copied_model = copy.deepcopy(model) # copy the model so the params will start from the same point
if alg_cls is LayerFreezing:
pytest.xfail('Known issues')
if alg_cls in (SAM, StochasticDepth):
pytest.xfail('Mismatch in weights when resuming from a checkpoint.')
if alg_cls is GyroDropout:
pytest.xfail('GyroDropoutLayer is not implemented in a way that allows correct resumption.')
if alg_cls is SWA and world_size > 1:
pytest.xfail('SWA is not implemented in a way that is compatible correct resumption on multiple devices.')
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
shared_config = {
'max_duration': '2ep',
'save_filename': 'ep{epoch}-rank{rank}',
'save_interval': '1ep',
'train_subset_num_batches': 2,
'precision': 'amp_fp16',
}
train_dataloader = get_alg_dataloader(alg_cls) if world_size == 1 else get_alg_dataloader(alg_cls, multigpu=True)
# train model once, saving checkpoints every epoch
trainer1 = Trainer(
model=model,
train_dataloader=train_dataloader,
optimizers=optimizer,
schedulers=scheduler,
save_folder=folder1,
algorithms=alg_cls(**alg_kwargs),
**shared_config,
)
trainer1.fit()
# create second trainer, load an intermediate checkpoint
# and continue training
optimizer = torch.optim.Adam(copied_model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
alg = alg_cls(**alg_kwargs)
# SeqLengthWarmup has a call to ._activate_model() that happens on the first call to the algorithm
# in order to get complete matching of the rng state, we have to cause that extra call to be skipped
# when reloading.
if alg_cls is SeqLengthWarmup:
alg._activated = True # type: ignore
train_dataloader = get_alg_dataloader(alg_cls) if world_size == 1 else get_alg_dataloader(alg_cls, multigpu=True)
trainer2 = Trainer(
model=copied_model,
train_dataloader=train_dataloader,
load_path=os.path.join(folder1, 'ep1-rank{rank}'),
load_weights_only=False,
load_strict_model_weights=False,
optimizers=optimizer,
schedulers=scheduler,
save_folder=folder2,
algorithms=alg,
**shared_config,
)
trainer2.fit()
# check that the checkpoints are equal
if world_size == 1 or dist.get_global_rank() == 0:
_assert_checkpoints_equal(
file1=os.path.join(folder1, 'ep2-rank0'),
file2=os.path.join(folder2, 'ep2-rank0'),
)
# check that different epoch checkpoints are _not_ equal
# this ensures that the model weights are being updated.
if world_size == 1 or dist.get_global_rank() == 0:
with pytest.raises(AssertionError):
_assert_model_weights_equal(
file1=os.path.join(folder1, 'ep1-rank0'),
file2=os.path.join(folder1, 'ep2-rank0'),
)
def _assert_checkpoints_equal(file1, file2):
checkpoint1 = torch.load(file1)
checkpoint2 = torch.load(file2)
# compare rng
deep_compare(checkpoint1['rng'], checkpoint2['rng'])
# compare state
# remove the wall clock time fields since they will always differ
del checkpoint1['state']['timestamp']['Timestamp']['total_wct']
del checkpoint1['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint1['state']['timestamp']['Timestamp']['batch_wct']
del checkpoint2['state']['timestamp']['Timestamp']['total_wct']
del checkpoint2['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint2['state']['timestamp']['Timestamp']['batch_wct']
# delete run_name since its time dependent
del checkpoint1['state']['run_name']
del checkpoint2['state']['run_name']
# Remove algorithm representations which are memory addresses
for i, algo_info in enumerate(checkpoint1['state']['algorithms']):
if '0x' in algo_info[1]['repr']:
del checkpoint1['state']['algorithms'][i]
for i, algo_info in enumerate(checkpoint2['state']['algorithms']):
if '0x' in algo_info[1]['repr']:
del checkpoint2['state']['algorithms'][i]
deep_compare(checkpoint1['state'], checkpoint2['state'])
def _assert_model_weights_equal(file1, file2):
checkpoint1 = torch.load(file1)
checkpoint2 = torch.load(file2)
deep_compare(checkpoint1['state']['model'], checkpoint2['state']['model'])
| composer-dev | tests/algorithms/test_algorithm_resumption.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from composer.algorithms.channels_last import apply_channels_last
from composer.algorithms.channels_last.channels_last import ChannelsLast
from composer.core.event import Event
from composer.core.state import State
from composer.loggers import Logger
from tests.common import SimpleConvModel
def _has_singleton_dimension(tensor: torch.Tensor) -> bool:
return any(s == 1 for s in tensor.shape)
def _infer_memory_format(tensor: torch.Tensor) -> str:
if _has_singleton_dimension(tensor):
raise ValueError(f'Tensor of shape {tensor.shape} has singleton dimensions, '
'memory format cannot be infered from strides.')
base_order = list('nchw') # type: ignore
strides = tensor.stride()
if isinstance(strides, tuple) and len(strides) == 4:
order = np.argsort(strides)
# smallest stride should be last in format, so reverse order
memory_format = ''.join([base_order[o] for o in reversed(order)])
return memory_format
else:
raise ValueError(f'Tensor must be 4-dim, got shape {tensor.shape}')
@pytest.fixture()
def state(minimal_state: State):
minimal_state.model = SimpleConvModel()
return minimal_state
@pytest.fixture()
def simple_conv_model():
return SimpleConvModel()
def test_channels_last_functional(simple_conv_model: SimpleConvModel):
model = simple_conv_model
conv = model.conv1
assert _infer_memory_format(conv.weight) == 'nchw'
apply_channels_last(simple_conv_model)
assert _infer_memory_format(conv.weight) == 'nhwc'
@pytest.mark.parametrize(
'device',
[pytest.param('cpu'), pytest.param('gpu', marks=pytest.mark.gpu)],
)
def test_channels_last_algorithm(state: State, empty_logger: Logger, device: str):
channels_last = ChannelsLast()
if device == 'gpu':
state.model = state.model.cuda() # move the model to gpu
assert isinstance(state.model, SimpleConvModel)
assert _infer_memory_format(state.model.conv1.weight) == 'nchw'
channels_last.apply(Event.INIT, state, empty_logger)
assert isinstance(state.model, SimpleConvModel)
assert _infer_memory_format(state.model.conv1.weight) == 'nhwc'
# Test helper utility _infer_memory_format
@pytest.fixture(params=[True, False])
def tensor(request) -> torch.Tensor:
strided = request.param
tensor = torch.randn((16, 32, 32, 64))
if strided:
tensor = tensor[::2, ::2, ::2, ::2]
return tensor
def test_infer_memory_format_nhwc(tensor):
tensor = tensor.to(memory_format=torch.channels_last)
assert _infer_memory_format(tensor) == 'nhwc'
def test_infer_memory_format_nchw(tensor):
tensor = tensor.to(memory_format=torch.contiguous_format)
assert _infer_memory_format(tensor) == 'nchw'
def test_infer_memory_format_wcnh(tensor):
tensor = tensor.to(memory_format=torch.contiguous_format)
tensor = tensor.permute(2, 1, 3, 0)
assert _infer_memory_format(tensor) == 'wcnh'
def test_infer_memory_format_incorrect_ndims():
tensor = torch.randn((16, 32, 32))
with pytest.raises(ValueError):
_infer_memory_format(tensor)
def test_infer_memory_format_singleton():
tensor = torch.randn((16, 32, 1, 64))
with pytest.raises(ValueError):
_infer_memory_format(tensor)
| composer-dev | tests/algorithms/test_channels_last.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from composer.algorithms import CutMix
from composer.algorithms.cutmix.cutmix import _rand_bbox, cutmix_batch
from composer.core import Event
from composer.models import ComposerClassifier
# (N, C, d1, d2, num_classes)
@pytest.fixture(params=[(7, 11, 3, 5, 10)])
def fake_data(request):
# Generate some fake data
N, C, d1, d2, num_classes = request.param
x_fake = torch.randn(N, C, d1, d2)
y_fake = torch.randint(num_classes, size=(N,))
indices = torch.randperm(N)
return x_fake, y_fake, indices
def validate_cutmix(x, y, indices, x_cutmix, y_perm, bbox):
# Create shuffled version of x, y for reference checking
x_perm = x[indices]
y_perm_ref = y[indices]
# Explicitly check that the pixels and labels have been mixed correctly.
for i in range(x.size(0)): # Grab N
# Check every pixel of the input data
for j in range(x.size(2)):
for k in range(x.size(3)):
if (j >= bbox[0] and j < bbox[2]) and (k >= bbox[1] and k < bbox[3]):
torch.testing.assert_close(x_perm[i, :, j, k], x_cutmix[i, :, j, k])
else:
torch.testing.assert_close(x[i, :, j, k], x_cutmix[i, :, j, k])
# Check the label
torch.testing.assert_close(y_perm_ref, y_perm)
@pytest.mark.parametrize('alpha', [0.2, 1])
@pytest.mark.parametrize('uniform_sampling', [True, False])
@pytest.mark.parametrize('interpolate_loss', [True, False])
class TestCutMix:
def test_cutmix(self, fake_data, alpha, uniform_sampling, interpolate_loss):
# Generate fake data
x_fake, y_fake, indices = fake_data
# Get lambda based on alpha hparam
cutmix_lambda = np.random.beta(alpha, alpha)
# Get a random bounding box based on cutmix_lambda
cx = np.random.randint(x_fake.shape[2])
cy = np.random.randint(x_fake.shape[3])
bbx1, bby1, bbx2, bby2 = _rand_bbox(W=x_fake.shape[2],
H=x_fake.shape[3],
cutmix_lambda=cutmix_lambda,
cx=cx,
cy=cy,
uniform_sampling=uniform_sampling)
bbox = (bbx1, bby1, bbx2, bby2)
# Adjust lambda
cutmix_lambda = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x_fake.size()[-1] * x_fake.size()[-2]))
# Apply cutmix
x_cutmix, y_perm, _, _ = cutmix_batch(x_fake,
y_fake,
alpha=1.0,
bbox=bbox,
indices=indices,
uniform_sampling=uniform_sampling)
# Validate results
validate_cutmix(x=x_fake, y=y_fake, indices=indices, x_cutmix=x_cutmix, y_perm=y_perm, bbox=bbox)
def test_cutmix_algorithm(self, fake_data, alpha, uniform_sampling, minimal_state, empty_logger, interpolate_loss):
# Generate fake data
x_fake, y_fake, _ = fake_data
algorithm = CutMix(alpha=alpha, uniform_sampling=uniform_sampling, interpolate_loss=interpolate_loss)
state = minimal_state
state.model = ComposerClassifier(torch.nn.Flatten(), num_classes=2)
state.batch = (x_fake, y_fake)
# Apply algo, use test hooks to specify indices and override internally generated interpolation lambda for testability
algorithm.apply(Event.BEFORE_FORWARD, state, empty_logger)
x, _ = state.batch
y_perm = algorithm._permuted_target
# Validate results
validate_cutmix(x=x_fake, y=y_fake, indices=algorithm._indices, x_cutmix=x, y_perm=y_perm, bbox=algorithm._bbox)
| composer-dev | tests/algorithms/test_cutmix.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.nn import GroupNorm
from torch.utils.data import DataLoader
from composer.algorithms.low_precision_groupnorm import LowPrecisionGroupNorm, apply_low_precision_groupnorm
from composer.algorithms.low_precision_groupnorm.low_precision_groupnorm import LPGroupNorm
from composer.core import Event, State
from composer.loggers import Logger
from composer.utils import get_device
from tests.common import RandomImageDataset
from tests.common.models import SimpleConvModel
def assert_is_lpgn_instance(model):
# ensure that within the entire model, no PyTorch GroupNorm exists, and at least one LPGN does.
assert model.modules is not None, 'model has .modules method'
for module_class in model.modules():
if isinstance(module_class, GroupNorm):
assert isinstance(module_class, LPGroupNorm)
assert any(isinstance(module_class, LPGroupNorm) for module_class in model.modules())
@pytest.mark.parametrize('affine', [True, False])
def test_low_precision_groupnorm_functional(affine):
model = SimpleConvModel(norm='group', norm_affine=affine)
dataloader = DataLoader(RandomImageDataset(), batch_size=2)
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
precision='amp_fp16',
device=get_device('cpu'),
)
apply_low_precision_groupnorm(state.model, state._precision, state.optimizers)
assert_is_lpgn_instance(state.model)
@pytest.mark.parametrize('affine', [True, False])
def test_low_precision_groupnorm_algorithm(affine, empty_logger: Logger):
model = SimpleConvModel(norm='group', norm_affine=affine)
dataloader = DataLoader(RandomImageDataset(), batch_size=2)
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
precision='amp_fp16',
device=get_device('cpu'),
)
low_precision_groupnorm = LowPrecisionGroupNorm()
low_precision_groupnorm.apply(Event.INIT, state, empty_logger)
assert_is_lpgn_instance(state.model)
| composer-dev | tests/algorithms/test_low_precision_groupnorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import Mock
import pytest
import torch
from composer.algorithms import Factorize
from composer.algorithms.factorize import FactorizedConv2d, FactorizedLinear
from composer.algorithms.factorize.factorize import LOG_NUM_CONV2D_REPLACEMENTS_KEY, LOG_NUM_LINEAR_REPLACEMENTS_KEY
from composer.core import Event, State
from composer.loggers import Logger
from composer.models import HuggingFaceModel
from composer.utils import module_surgery
from tests.common import SimpleConvModel, SimpleTransformerClassifier
from tests.common.datasets import dummy_tiny_bert_lm_batch, dummy_transformer_classifier_batch
from tests.common.models import configure_tiny_bert_hf_model
def create_state(minimal_state: State, model):
minimal_state.model = model
return minimal_state
def create_algo_instance(replace_convs, replace_linears):
return Factorize(factorize_convs=replace_convs,
factorize_linears=replace_linears,
min_channels=1,
latent_channels=2,
min_features=1,
latent_features=2)
@pytest.mark.parametrize('model_cls, model_params', [(SimpleConvModel, (3, 100)), (SimpleTransformerClassifier, ()),
(configure_tiny_bert_hf_model, ())])
@pytest.mark.parametrize('replace_convs', [False, True])
@pytest.mark.parametrize('replace_linears', [False, True])
def test_factorize_surgery(minimal_state: State, model_cls, model_params, empty_logger: Logger, replace_convs: bool,
replace_linears: bool):
model = model_cls(*model_params)
state = create_state(minimal_state, model)
if (isinstance(model, SimpleTransformerClassifier) or isinstance(model, HuggingFaceModel)) and replace_convs:
pytest.skip('Skipping: NLP models do not contain conv layers.')
algo_instance = create_algo_instance(replace_convs, replace_linears)
num_conv_layers = module_surgery.count_module_instances(state.model, torch.nn.Conv2d)
num_linear_layers = module_surgery.count_module_instances(state.model, torch.nn.Linear)
algo_instance.apply(event=Event.INIT, state=state, logger=empty_logger)
# Each Conv2d/Linear is either unmodified or replaced with a factorized version,
# which contains two Conv2d/Linears submodules.
if algo_instance.factorize_convs:
num_factorized_layers = module_surgery.count_module_instances(state.model, FactorizedConv2d)
num_non_factorized_layers = module_surgery.count_module_instances(state.model,
torch.nn.Conv2d) - 2 * num_factorized_layers
assert num_conv_layers == num_factorized_layers + num_non_factorized_layers
assert num_factorized_layers > 0
if algo_instance.factorize_linears:
num_factorized_layers = module_surgery.count_module_instances(state.model, FactorizedLinear)
num_non_factorized_layers = module_surgery.count_module_instances(state.model,
torch.nn.Linear) - 2 * num_factorized_layers
assert num_linear_layers == num_factorized_layers + num_non_factorized_layers
assert num_factorized_layers > 0
@pytest.mark.parametrize('model_cls, model_params, batch',
[(SimpleConvModel, (3, 100), (torch.Tensor(64, 3, 32, 32), torch.Tensor())),
(SimpleTransformerClassifier, (), dummy_transformer_classifier_batch()),
(configure_tiny_bert_hf_model, (), dummy_tiny_bert_lm_batch())])
@pytest.mark.parametrize('replace_convs', [False, True])
@pytest.mark.parametrize('replace_linears', [False, True])
def test_forward_shape(minimal_state: State, model_cls, model_params, empty_logger: Logger, batch, replace_convs,
replace_linears):
model = model_cls(*model_params)
if (isinstance(model, SimpleTransformerClassifier) or isinstance(model, HuggingFaceModel)) and replace_convs:
pytest.skip('Skipping: NLP models do not contain conv layers.')
if isinstance(model, SimpleTransformerClassifier):
pytest.xfail(
'Factorize does not support torch.nn.MultiheadAttention layers, which are part of the SimpleTransformerClassifier.'
)
state = create_state(minimal_state, model)
algo_instance = create_algo_instance(replace_convs, replace_linears)
output = state.model.forward(batch)
algo_instance.apply(event=Event.INIT, state=state, logger=empty_logger)
new_output = state.model.forward(batch)
if isinstance(model, HuggingFaceModel):
assert output.logits.size() == new_output.logits.size()
else:
assert output.size() == new_output.size()
@pytest.mark.parametrize('model_cls, model_params', [(SimpleConvModel, (3, 100)), (SimpleTransformerClassifier, ()),
(configure_tiny_bert_hf_model, ())])
@pytest.mark.parametrize('replace_convs', [False, True])
@pytest.mark.parametrize('replace_linears', [False, True])
def test_algorithm_logging(minimal_state: State, model_cls, model_params, replace_convs, replace_linears):
model = model_cls(*model_params)
state = create_state(minimal_state, model)
if (isinstance(model, SimpleTransformerClassifier) or isinstance(model, HuggingFaceModel)) and replace_convs:
pytest.skip('Skipping: NLP models do not contain conv layers.')
algo_instance = create_algo_instance(replace_convs, replace_linears)
logger_mock = Mock()
algo_instance.apply(Event.INIT, state, logger=logger_mock)
factorize_convs = algo_instance.factorize_convs
factorize_linears = algo_instance.factorize_linears
mock_obj = logger_mock.log_hyperparameters
if factorize_convs:
num_factorized_convs = module_surgery.count_module_instances(state.model, FactorizedConv2d)
mock_obj.assert_any_call({LOG_NUM_CONV2D_REPLACEMENTS_KEY: num_factorized_convs})
if factorize_linears:
num_factorized_linears = module_surgery.count_module_instances(state.model, FactorizedLinear)
mock_obj.assert_any_call({LOG_NUM_LINEAR_REPLACEMENTS_KEY: num_factorized_linears})
target_count = 0
target_count += 1 if factorize_convs else 0
target_count += 1 if factorize_linears else 0
assert mock_obj.call_count == target_count
| composer-dev | tests/algorithms/test_factorize_algorithm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from composer.algorithms.progressive_resizing import ProgressiveResizing, resize_batch
from composer.core import Event
from composer.core.state import State
from composer.core.time import TimeUnit
from composer.loggers import Logger
def check_scaled_shape(orig: torch.Tensor, scaled: torch.Tensor, scale_factor: float) -> bool:
"""Asserts that the scaled shape is correct, given orig shape and scale_factor."""
N, C, H, W = orig.shape
Hc = int(scale_factor * H)
Wc = int(scale_factor * W)
return scaled.shape == (N, C, Hc, Wc)
@pytest.fixture
def Wx() -> int:
return 32
@pytest.fixture
def Hx(Wx: int) -> int:
return Wx
@pytest.fixture
def X(Wx: int, Hx: int):
torch.manual_seed(0)
return torch.randn(16, 8, Hx, Wx)
@pytest.fixture
def Wy(Wx: int) -> int:
return Wx
@pytest.fixture
def Hy(Hx: int) -> int:
return Hx
@pytest.fixture
def y(Wy: int, Hy: int):
torch.manual_seed(0)
return torch.randn(16, 8, Hy, Wy)
@pytest.fixture(params=[0.5, 0.75, 1])
def scale_factor(request) -> float:
return request.param
@pytest.fixture(params=['resize', 'crop'])
def mode(request) -> str:
return request.param
@pytest.fixture
def initial_scale() -> float:
return 0.5
@pytest.fixture
def finetune_fraction() -> float:
return 0.2
@pytest.fixture
def delay_fraction() -> float:
return 0.2
@pytest.fixture
def size_increment() -> float:
return 8
@pytest.fixture
def resize_targets() -> bool:
return False
class TestResizeInputs:
def test_resize_noop(self, X, y, mode):
"""Tests that no operation is performed when scale_factor == 1."""
Xc, _ = resize_batch(X, y, 1.0, mode, resize_targets=False)
assert X is Xc
@pytest.mark.parametrize('y', [None])
def test_without_target(self, X, y):
"""Test that resizing works properly with no target present."""
try:
resize_batch(X, y, 1.0, 'crop', resize_targets=False)
except:
pytest.fail('apply_progressive_resizing failed with y == None')
@pytest.mark.parametrize('Wx,Hx', [(31, 31), (32, 32), (32, 16)])
def test_resize_batch_shape(self, X: torch.Tensor, y: torch.Tensor, mode: str, scale_factor: float):
"""Test scaling works for different input shapes."""
Xc, _ = resize_batch(X, y, scale_factor, mode, resize_targets=False)
assert check_scaled_shape(X, Xc, scale_factor)
def test_resize_outputs_shape(self, X: torch.Tensor, y: torch.Tensor, mode: str, scale_factor: float):
"""Test that resizing outputs works."""
_, yc = resize_batch(X, y, scale_factor, mode, resize_targets=True)
assert check_scaled_shape(y, yc, scale_factor)
def test_resize_outputs_crop(self, X: torch.Tensor, scale_factor: float):
"""Test that resizing outputs in crop mode gives the right targets."""
xc, yc = resize_batch(X, X, scale_factor, 'crop', resize_targets=True)
assert torch.equal(xc, yc)
@pytest.mark.parametrize('Wx,Hx,Wy,Hy', [(32, 32, 16, 16)])
def test_resize_outputs_different_shape(self, X, y, scale_factor: float, mode: str):
"""Test that resizing works when X and y have different shapes."""
_, yc = resize_batch(X, y, scale_factor, mode, resize_targets=True)
assert check_scaled_shape(y, yc, scale_factor)
@pytest.mark.parametrize('mode,initial_scale,finetune_fraction,delay_fraction,size_increment',
[('foo', 0.5, 0.2, 0.2, 8), ('crop', 1.2, 0.2, 0.2, 8), ('crop', 0.5, 1.2, 0.2, 8),
('resize', 0.5, 0.6, 0.5, 8)])
def test_invalid_hparams(mode: str, initial_scale: float, finetune_fraction: float, delay_fraction: float,
size_increment: int):
"""Test that invalid hyperparameters error."""
with pytest.raises(ValueError):
ProgressiveResizing(mode, initial_scale, finetune_fraction, delay_fraction, size_increment, False)
class TestProgressiveResizingAlgorithm:
@pytest.fixture
def pr_algorithm(self, mode, initial_scale, finetune_fraction, delay_fraction, size_increment, resize_targets):
return ProgressiveResizing(mode, initial_scale, finetune_fraction, delay_fraction, size_increment,
resize_targets)
@pytest.mark.parametrize('event', [Event.AFTER_DATALOADER])
def test_match_correct(self, event: Event, pr_algorithm, minimal_state: State):
"""Algo should match AFTER_DATALOADER."""
assert pr_algorithm.match(event, minimal_state)
@pytest.mark.parametrize('event', [Event.INIT])
def test_match_incorrect(self, event: Event, pr_algorithm: ProgressiveResizing, minimal_state: State):
"""Algo should NOT match INIT."""
assert not pr_algorithm.match(event, minimal_state)
@pytest.mark.parametrize('epoch_frac', [0.0, 0.6, 0.8, 1.0])
def test_apply(self, epoch_frac: float, X: torch.Tensor, y: torch.Tensor, pr_algorithm: ProgressiveResizing,
minimal_state: State, empty_logger: Logger):
"""Test apply at different epoch fractions (fraction of max epochs)"""
assert minimal_state.max_duration is not None
assert minimal_state.max_duration.unit == TimeUnit.EPOCH
minimal_state.timestamp.epoch._value = int(epoch_frac * minimal_state.max_duration.value)
s = pr_algorithm.initial_scale
f = pr_algorithm.finetune_fraction
d = pr_algorithm.delay_fraction
p = pr_algorithm.size_increment
if epoch_frac >= d:
scale_factor_elapsed = min([(epoch_frac - d) / (1 - f - d), 1.0])
else:
scale_factor_elapsed = 0.0
scale_factor = s + (1 - s) * scale_factor_elapsed
minimal_state.batch = (X, y)
width = X.shape[3]
scaled_width_pinned = round(width * scale_factor / p) * p
scale_factor_pinned = scaled_width_pinned / width
pr_algorithm.apply(Event.AFTER_DATALOADER, minimal_state, empty_logger)
last_input, _ = minimal_state.batch
assert check_scaled_shape(X, last_input, scale_factor_pinned)
| composer-dev | tests/algorithms/test_progressive_resizing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from operator import attrgetter
import pytest
import torch
from composer.algorithms.alibi import Alibi, apply_alibi
from composer.core import Event, State
from composer.devices import DeviceCPU
from composer.loggers import Logger
from tests.common.datasets import dummy_bert_lm_dataloader, dummy_gpt_lm_dataloader
from tests.common.models import configure_tiny_bert_hf_model, configure_tiny_gpt2_hf_model
def _double_batch_sequence_length(batch):
for k, v in batch.items():
if v.ndim >= 2:
batch[k] = torch.cat([v, v], dim=1)
return batch
def check_position_embeddings(model, max_sequence_length):
transformers = pytest.importorskip('transformers')
if isinstance(model.config, transformers.GPT2Config):
position_embedding_attribute = 'model.transformer.wpe'
elif isinstance(model.config, transformers.BertConfig):
position_embedding_attribute = 'model.bert.embeddings.position_embeddings'
else:
raise NotImplementedError('Tests not implemented for model with config=' + str(type(model.config)))
pos_embedding_module = attrgetter(position_embedding_attribute)(model)
pos_embedding_weight = getattr(pos_embedding_module, 'weight')
assert pos_embedding_weight.shape[0] == max_sequence_length
assert not pos_embedding_weight.requires_grad
assert torch.max(torch.abs(pos_embedding_weight)) == 0.0
def check_forward_backward(model, batch):
model.zero_grad()
output = model.forward(batch)
output['loss'].backward()
def check_batch_reshaping(before, after, length):
# Make sure all the batch tensors have the same shape
input_ids_after_shape = after['input_ids'].shape
# Just make sure the lengths are correct
for k in before.keys():
assert k in after, 'No keys should be removed during sequence reshaping.'
assert after[
k].shape == input_ids_after_shape, 'All tensors should have the same size after sequence reshaping.'
b_numel = before[k].shape[0] * before[k].shape[1]
a_numel = after[k].shape[0] * after[k].shape[1]
assert a_numel >= b_numel - length, 'Sequence reshaping should throw away at most curr_sequence_length tokens.'
import torch
assert torch.all(after[k][0] == before[k][
0, :input_ids_after_shape[1]]), 'Sequence reshaping should not change the token order.'
for k in after.keys():
assert k in before, 'No keys should be added during sequence reshaping.'
def encountered_alibi_warning(caplog):
"""Return true if the caplog shows an alibi warning in the log"""
for (logger_name, level, _) in caplog.record_tuples:
if 'alibi' in logger_name and level >= 30: # Warnings are level 30
return True
return False
def test_warning_is_triggered(caplog):
"""Test that Alibi triggers a warning when it has no effect."""
pytest.importorskip('transformers')
apply_alibi(
model=torch.nn.Sequential(torch.nn.Linear(20, 10), torch.nn.Linear(10, 5)),
max_sequence_length=64,
)
assert encountered_alibi_warning(caplog), 'A warning should be generated when Alibi has no effect.'
def test_registry(caplog):
"""Test that registry additions are used by Alibi."""
pytest.importorskip('transformers')
from composer.algorithms.alibi.attention_surgery_functions import policy_registry
@policy_registry.register(torch.nn.Linear)
def zero_linear_weights( # pyright: reportUnusedFunction = none
module: torch.nn.Module, idx: int, max_sequence_length: int) -> torch.nn.Module:
assert isinstance(module, torch.nn.Linear)
old_weight = getattr(module, 'weight')
new_weight = torch.nn.Parameter(torch.zeros_like(old_weight))
setattr(module, 'weight', new_weight)
return module
apply_alibi(
model=torch.nn.Sequential(torch.nn.Linear(20, 10), torch.nn.Linear(10, 5)),
max_sequence_length=64,
)
assert not encountered_alibi_warning(caplog), 'No warnings should be generated after adding to the registry.'
del (policy_registry[torch.nn.Linear])
@pytest.mark.parametrize('model,dataloader', [(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(configure_tiny_gpt2_hf_model, dummy_gpt_lm_dataloader)])
class TestAlibi:
def test_functional(
self,
model,
dataloader,
caplog,
):
transformers = pytest.importorskip('transformers')
model = model()
if isinstance(model.config, transformers.GPT2Config):
max_sequence_length = model.config.n_positions
elif isinstance(model.config, transformers.BertConfig):
max_sequence_length = model.config.max_position_embeddings
else:
raise NotImplementedError('Tests not implemented for model with config=' + str(type(model.config)))
dataloader = dataloader(sequence_length=max_sequence_length)
#### With default sequence length ####
# Apply ALiBi using the functional
apply_alibi(
model=model,
max_sequence_length=max_sequence_length,
)
assert not encountered_alibi_warning(caplog) # This should not generate any warnings
# Ensure that the position embeddings are properly shaped and zeroed
check_position_embeddings(model, max_sequence_length)
# Try a forward/backward at the max sequence length
batch = next(iter(dataloader))
assert batch['input_ids'].shape[1] == max_sequence_length
check_forward_backward(model, batch)
#### With double sequence length ####
# Apply ALiBi using the functional
apply_alibi(
model=model,
max_sequence_length=2 * max_sequence_length,
)
assert not encountered_alibi_warning(caplog) # This should not generate any warnings
# Ensure that the position embeddings are properly shaped and zeroed
check_position_embeddings(model, 2 * max_sequence_length)
# Try a forward/backward at the max sequence length
batch = next(iter(dataloader))
batch = _double_batch_sequence_length(batch)
assert batch['input_ids'].shape[1] == 2 * max_sequence_length
check_forward_backward(model, batch)
@pytest.mark.parametrize('train_sequence_length_scaling', [0.25, 1.0])
def test_algorithm(self, model, dataloader, empty_logger: Logger, train_sequence_length_scaling: float, caplog,
request: pytest.FixtureRequest):
transformers = pytest.importorskip('transformers')
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=DeviceCPU(),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
if isinstance(model.config, transformers.GPT2Config):
max_sequence_length: int = state.model.config.n_positions # type: ignore
elif isinstance(model.config, transformers.BertConfig):
max_sequence_length: int = state.model.config.max_position_embeddings # type: ignore
else:
raise NotImplementedError('Tests not implemented for model with config=' + str(type(model.config)))
# Synthetic dataset has a size of 2 batches per epoch (max duration = 1ep)
alibi = Alibi(
max_sequence_length=max_sequence_length,
train_sequence_length_scaling=train_sequence_length_scaling,
)
# Apply ALiBi to the model
alibi.apply(Event.INIT, state, empty_logger)
assert not encountered_alibi_warning(caplog) # This should not generate any warnings
batch_before = next(iter(dataloader))
state.batch = deepcopy(batch_before)
# Apply any batch reshaping
alibi.apply(Event.AFTER_DATALOADER, state, empty_logger)
# Ensure proper batch reshaping
check_batch_reshaping(batch_before, state.batch, int(train_sequence_length_scaling * max_sequence_length))
# Ensure that the model runs forwards/backwards
check_forward_backward(state.model, state.batch)
| composer-dev | tests/algorithms/test_alibi.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""
Tests a variety of export options with our surgery methods applied, including
torchscript, torch.fx, and ONNX.
"""
import os
import pathlib
from typing import Any, Callable, Type
import pytest
import torch
import torch.fx
from composer.algorithms import BlurPool, ChannelsLast, Factorize, GhostBatchNorm, SqueezeExcite, StochasticDepth
from composer.core import Algorithm
from composer.functional import (apply_blurpool, apply_channels_last, apply_factorization, apply_ghost_batchnorm,
apply_squeeze_excite, apply_stochastic_depth)
from tests.algorithms.algorithm_settings import get_alg_kwargs, get_alg_model, get_algs_with_marks
algo_kwargs = {
apply_stochastic_depth: {
'stochastic_method': 'block',
'target_layer_name': 'ResNetBottleneck',
},
apply_ghost_batchnorm: {
'ghost_batch_size': 2
},
}
@pytest.fixture
def input():
# input batch to ComposerModel is (input, target) tuple
return (torch.rand(4, 3, 112, 112), torch.Tensor())
torchscript_algs_with_marks = [
x for x in get_algs_with_marks()
if x.values[0] in (BlurPool, Factorize, GhostBatchNorm, SqueezeExcite, StochasticDepth, ChannelsLast)
]
# <--- torchscript export --->
def get_surgery_method(alg_cls: Type[Algorithm]) -> Callable:
if alg_cls is BlurPool:
return apply_blurpool
if alg_cls is Factorize:
return apply_factorization
if alg_cls is GhostBatchNorm:
return apply_ghost_batchnorm
if alg_cls is SqueezeExcite:
return apply_squeeze_excite
if alg_cls is StochasticDepth:
return apply_stochastic_depth
if alg_cls is ChannelsLast:
return apply_channels_last
raise ValueError(f'Unknown algorithm class {alg_cls}')
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchscript_train(input: Any, alg_cls: Type[Algorithm]):
"""Tests torchscript model in train mode."""
if alg_cls in (Factorize, GhostBatchNorm, StochasticDepth):
pytest.xfail('Unsupported')
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
surgery_method = get_surgery_method(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
scripted_func = torch.jit.script(model)
scripted_func.train() # type: ignore (third-party)
model.train()
torch.testing.assert_close(scripted_func(input), model(input)) # type: ignore (third-party)
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchscript_eval(input: Any, alg_cls: Type[Algorithm]):
"""Tests torchscript model in eval mode."""
if alg_cls is Factorize:
pytest.xfail('Unsupported')
surgery_method = get_surgery_method(alg_cls)
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
scripted_func = torch.jit.script(model)
scripted_func.eval() # type: ignore (third-party)
model.eval()
torch.testing.assert_close(scripted_func(input), model(input)) # type: ignore (third-party)
# <--- torch.fx export --->
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchfx_eval(
input: Any,
alg_cls: Type[Algorithm],
):
"""Tests torch.fx model in eval mode."""
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
surgery_method = get_surgery_method(alg_cls)
if alg_cls in (BlurPool, GhostBatchNorm):
pytest.xfail('Control flow')
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
model.eval()
traced_func = torch.fx.symbolic_trace(model)
torch.testing.assert_close(traced_func(input), model(input)) # type: ignore (third-party)
# <--- onnx export --->
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
@pytest.mark.filterwarnings(
r'ignore:Converting a tensor to a Python .* might cause the trace to be incorrect:torch.jit._trace.TracerWarning')
@pytest.mark.filterwarnings('ignore:__floordiv__ is deprecated')
def test_surgery_onnx(
input: Any,
alg_cls: Type[Algorithm],
tmp_path: pathlib.Path,
):
"""Tests onnx export and runtime"""
pytest.importorskip('onnx')
pytest.importorskip('onnxruntime')
import onnx
import onnxruntime as ort
surgery_method = get_surgery_method(alg_cls)
model = get_alg_model(alg_cls)
alg_kwargs = get_alg_kwargs(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
model.eval()
onnx_path = os.path.join(tmp_path, 'model.onnx')
torch.onnx.export(
model,
(input,),
onnx_path,
input_names=['input'],
output_names=['output'],
)
# check onnx model
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model) # type: ignore (third-party)
# run inference
ort_session = ort.InferenceSession(onnx_path)
outputs = ort_session.run(
None,
{'input': input[0].numpy()},
)
torch.testing.assert_close(
outputs[0],
model(input).detach().numpy(),
rtol=1e-4, # lower tolerance for ONNX
atol=1e-3, # lower tolerance for ONNX
)
| composer-dev | tests/algorithms/test_torch_export.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/algorithms/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from composer.algorithms.factorize import FactorizedConv2d, FactorizedLinear
@pytest.mark.parametrize('batch_size', [1, 2])
@pytest.mark.parametrize('h', [5])
@pytest.mark.parametrize('w', [6])
@pytest.mark.parametrize('in_channels', [4, 8])
@pytest.mark.parametrize('out_channels', [4, 8])
@pytest.mark.parametrize('kernel_size', [(1, 1), (2, 2), (3, 3), (1, 3), (3, 1), (5, 5)])
def test_factorized_conv2d_shapes(batch_size, h, w, in_channels, out_channels, kernel_size):
X = torch.randn(batch_size, in_channels, h, w)
conv = FactorizedConv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size)
Y = conv(X)
assert Y.ndim == 4
assert Y.shape[:2] == (batch_size, out_channels)
@pytest.mark.parametrize('batch_size', [1, 2])
@pytest.mark.parametrize('in_features', [7, 8, 11])
@pytest.mark.parametrize('out_features', [4, 6, 8])
def test_factorized_linear_shapes(batch_size, in_features, out_features):
X = torch.randn(batch_size, in_features)
module = FactorizedLinear(in_features=in_features, out_features=out_features)
Y = module(X)
assert Y.ndim == 2
assert Y.shape == (batch_size, out_features)
def test_update_factorized_conv2d_twice():
batch_size = 2
h = 5
w = 6
C_in = 36
C_out = 40
C_latent = 16
X = torch.randn(batch_size, C_in, h, w)
kernel_size = (3, 3)
module = FactorizedConv2d(in_channels=C_in,
out_channels=C_out,
latent_channels=C_latent,
kernel_size=kernel_size,
padding=0)
def _check_conv_shapes(module: FactorizedConv2d, C_in, C_out, C_latent):
assert module.latent_channels == C_latent
assert module.module0 is not None
assert module.module0.in_channels == C_in
assert module.module0.out_channels == C_latent
assert isinstance(module.module0.weight, torch.Tensor)
assert module.module0.weight.shape[:2] == (C_latent, C_in)
assert module.module1 is not None
assert module.module1.in_channels == C_latent
assert module.module1.out_channels == C_out
assert isinstance(module.module1.weight, torch.Tensor)
assert module.module1.weight.shape[:2] == (C_out, C_latent)
for new_C_latent in [12, 8]:
module.set_rank(X, new_C_latent)
_check_conv_shapes(module, C_in=C_in, C_out=C_out, C_latent=new_C_latent)
def test_update_factorized_linear_twice():
batch_size = 2
d_in = 36
d_out = 40
d_latent = 16
X = torch.randn(batch_size, d_in)
module = FactorizedLinear(in_features=d_in, out_features=d_out, latent_features=d_latent)
def _check_shapes(module: FactorizedLinear, d_in, d_out, d_latent):
assert module.latent_features == d_latent
assert module.module0.in_features == d_in
assert module.module0.out_features == d_latent
# linear layer weights have shape (out_features, in_features)
assert module.module0.weight.shape == (d_latent, d_in)
assert module.module1 is not None
assert module.module1.in_features == d_latent
assert module.module1.out_features == d_out
assert module.module1.weight.shape == (d_out, d_latent)
_check_shapes(module, d_in=d_in, d_out=d_out, d_latent=d_latent)
for new_d_latent in [12, 8]:
module.set_rank(X, new_d_latent)
_check_shapes(module, d_in=d_in, d_out=d_out, d_latent=new_d_latent)
@pytest.mark.parametrize('sizes', [(8, 8, 4), (7, 5, 3), (10, 10, 0.5)])
def test_factorized_conv_init_throws_if_latent_too_big(sizes):
with pytest.raises(ValueError):
FactorizedConv2d(in_channels=sizes[0], out_channels=sizes[1], latent_channels=sizes[2], kernel_size=(1, 1))
@pytest.mark.parametrize('sizes', [(8, 8, 4), (7, 5, 3), (10, 10, 0.5)])
def test_factorized_linear_init_throws_if_latent_too_big(sizes):
with pytest.raises(ValueError):
FactorizedLinear(in_features=sizes[0], out_features=sizes[1], latent_features=sizes[2])
@pytest.mark.parametrize('sizes', [(8, 8, 3, 4), (7, 5, 2, 4), (12, 13, 0.3, 5)])
def test_factorized_conv2d_set_rank_throws_if_latent_too_big(sizes):
module = FactorizedConv2d(in_channels=sizes[0], out_channels=sizes[1], kernel_size=1, latent_channels=sizes[2])
X = torch.randn(2, module.in_channels, 8, 8)
with pytest.raises(ValueError):
module.set_rank(X, sizes[3])
@pytest.mark.parametrize('sizes', [(8, 8, 3, 4), (7, 5, 2, 4), (12, 13, 0.3, 5)])
def test_factorized_linear_set_rank_throws_if_latent_too_big(sizes):
module = FactorizedLinear(in_features=sizes[0], out_features=sizes[1], latent_features=sizes[2])
X = torch.randn(2, module.in_features)
with pytest.raises(ValueError):
module.set_rank(X, sizes[3])
| composer-dev | tests/algorithms/test_factorized_modules.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable
import pytest
import torch
from composer.algorithms.selective_backprop import SelectiveBackprop
from composer.algorithms.selective_backprop.selective_backprop import select_using_loss, should_selective_backprop
from composer.core import Event
from composer.core.state import State
from composer.loggers import Logger
from composer.models import ComposerClassifier
@pytest.fixture
def N() -> int:
"""Batch size."""
return 16
@pytest.fixture
def D() -> int:
"""Input dimension."""
return 8
@pytest.fixture
def X(N: int, D: int) -> torch.Tensor:
"""2D input."""
torch.manual_seed(42)
return torch.randn(N, D)
@pytest.fixture
def X3D(N: int, D: int) -> torch.Tensor:
"""3D input."""
torch.manual_seed(42)
return torch.randn(N, D, D)
@pytest.fixture
def X5D(N: int, D: int) -> torch.Tensor:
"""5D input."""
torch.manual_seed(42)
return torch.randn(N, D, D, D, D)
@pytest.fixture
def Ximage(N: int) -> torch.Tensor:
"""4D image input."""
torch.manual_seed(42)
return torch.randn(N, 3, 32, 32)
@pytest.fixture
def y(N: int) -> torch.Tensor:
"""Target."""
torch.manual_seed(42)
return torch.randint(2, (N,))
@pytest.fixture
def loss_fun() -> Callable:
"""Fake loss function."""
def loss(output, target, reduction='none'):
return torch.ones_like(target)
return loss
@pytest.fixture
def loss_fun_tuple() -> Callable:
"""Fake loss function that requires a batch tuple."""
def loss(output, batch, reduction='none'):
_, target = batch
return torch.ones_like(target)
return loss
@pytest.fixture
def bad_loss() -> Callable:
"""Fake loss function that will error."""
def loss(output, target):
return 0
return loss
@pytest.fixture
def model(X: torch.Tensor) -> torch.nn.Module:
"""Simple fake linear model."""
return torch.nn.Linear(X.shape[1], 1)
@pytest.fixture
def model3D(X3D: torch.Tensor) -> torch.nn.Module:
"""Simple fake linear model."""
return torch.nn.Sequential(torch.nn.AdaptiveAvgPool1d(1), torch.nn.Flatten(), torch.nn.Linear(X3D.shape[1], 1))
@pytest.fixture
def model5D(X5D: torch.Tensor) -> torch.nn.Module:
"""Simple fake linear model."""
return torch.nn.Sequential(torch.nn.AdaptiveAvgPool3d(1), torch.nn.Flatten(), torch.nn.Linear(X5D.shape[1], 1))
@pytest.fixture
def keep() -> float:
"""keep hparam."""
return 0.5
@pytest.fixture
def scale_factor() -> float:
"""scale_factor hparam."""
return 0.5
@pytest.fixture
def epoch() -> int:
"""Default epoch."""
return 5
@pytest.fixture
def batch() -> int:
"""Default batch."""
return 0
@pytest.fixture
def conv_model(Ximage: torch.Tensor, D: int) -> ComposerClassifier:
"""Dummy conv model."""
return ComposerClassifier(torch.nn.Conv2d(Ximage.shape[1], D, 3), num_classes=2)
@pytest.fixture
def state(minimal_state: State, conv_model: ComposerClassifier, loss_fun_tuple: Callable, epoch: int,
batch: int) -> State:
"""State with required values set for Selective Backprop."""
assert minimal_state.dataloader_len is not None
conv_model.loss = loss_fun_tuple
minimal_state.model = conv_model
minimal_state.timestamp = minimal_state.timestamp.copy(
epoch=epoch,
batch=epoch * int(minimal_state.dataloader_len) + batch,
batch_in_epoch=batch,
)
return minimal_state
# tests of the functional API
class TestSelectiveBackprop:
@pytest.mark.parametrize('epoch,batch,interrupt', [(10, 0, 0), (10, 0, 2), (10, 2, 2)])
def test_select_using_loss_true(self, epoch: int, batch: int, interrupt: int) -> None:
"""Test functional match when epoch is within interval."""
start = 5
end = 15
is_chosen = should_selective_backprop(epoch, batch, start, end, interrupt)
assert is_chosen
@pytest.mark.parametrize('epoch,batch,interrupt', [(0, 0, 0), (20, 0, 0), (10, 1, 2)])
def test_select_using_loss_false(self, epoch: int, batch: int, interrupt: int) -> None:
"""Test functional doesn't match when epoch is outside of interval."""
start = 5
end = 15
is_chosen = should_selective_backprop(epoch, batch, start, end, interrupt)
assert not is_chosen
@pytest.mark.parametrize('keep', [0.5])
@pytest.mark.parametrize('scale_factor', [0.5])
@pytest.mark.xfail()
def test_selective_output_shape_3D(self, X3D: torch.Tensor, y: torch.Tensor, model: torch.nn.Module,
loss_fun: Callable, keep: float, scale_factor: float) -> None:
"""Test functional selection on 3D inputs."""
N, D, _ = X3D.shape
X_scaled, y_scaled = select_using_loss(X3D, y, model, loss_fun, keep, scale_factor)
assert X_scaled.shape == (int(N * keep), D, D)
assert y_scaled.shape == (int(N * keep),)
@pytest.mark.parametrize('keep', [1, 0.5, 0.75])
@pytest.mark.parametrize('scale_factor', [1])
def test_selective_output_shape(self, X: torch.Tensor, y: torch.Tensor, model: torch.nn.Module, loss_fun: Callable,
keep: float, scale_factor: float) -> None:
"""Test functional selection on 2D inputs."""
N, D = X.shape
X_scaled, y_scaled = select_using_loss(X, y, model, loss_fun, keep, scale_factor)
assert X_scaled.shape == (int(N * keep), D)
assert y_scaled.shape == (int(N * keep),)
@pytest.mark.parametrize('keep', [0.5, 0.75, 1])
@pytest.mark.parametrize('scale_factor', [0.5, 0.75])
def test_selective_output_shape_scaled(self, Ximage: torch.Tensor, y: torch.Tensor, conv_model: ComposerClassifier,
loss_fun: Callable, keep: float, scale_factor: float) -> None:
"""Test functional selection on 4D inputs."""
N, C, H, W = Ximage.shape
X_scaled, y_scaled = select_using_loss(Ximage, y, conv_model.module, loss_fun, keep, scale_factor)
assert X_scaled.shape == (int(N * keep), C, H, W)
assert y_scaled.shape == (int(N * keep),)
def test_selective_backprop_interp_dim_error(self, X: torch.Tensor, y: torch.Tensor, model: torch.nn.Module,
loss_fun: Callable) -> None:
"""Ensure that ValueError is raised when input tensor can't be scaled."""
with pytest.raises(ValueError):
select_using_loss(X, y, model, loss_fun, 1, 0.5)
def test_selective_backprop_bad_loss_error(self, X: torch.Tensor, y: torch.Tensor, model: torch.nn.Module,
bad_loss: Callable) -> None:
"""Ensure that ValueError is raised when loss function doesn't have `reduction` kwarg."""
with pytest.raises(TypeError) as execinfo:
select_using_loss(X, y, model, bad_loss, 1, 1)
MATCH = 'must take a keyword argument `reduction`.'
assert MATCH in str(execinfo.value)
class TestSelectiveBackpropAlgorithm:
"""
Test Selective Backprop Algorithm
"""
@pytest.fixture
def sb_algorithm(self, scale_factor, keep) -> SelectiveBackprop:
return SelectiveBackprop(
start=0.5,
end=0.8,
keep=keep,
scale_factor=scale_factor,
interrupt=2,
)
@pytest.mark.parametrize('event', [Event.AFTER_DATALOADER])
@pytest.mark.parametrize('epoch,batch', [(5, 0), (7, 0), (5, 2)])
def test_match_correct(self, event: Event, sb_algorithm: SelectiveBackprop, state: State) -> None:
"""Algo should match AFTER_DATALOADER in the right interval."""
state.max_duration = '10ep'
assert sb_algorithm.match(event, state)
@pytest.mark.parametrize('event,epoch,batch', [(Event.AFTER_DATALOADER, 0, 0), (Event.AFTER_DATALOADER, 5, 1)])
def test_match_incorrect(self, event: Event, sb_algorithm: SelectiveBackprop, state: State) -> None:
"""Algo should NOT match the wrong interval."""
state.max_duration = '10ep'
assert not sb_algorithm.match(event, state)
@pytest.mark.parametrize('epoch,batch', [(5, 0)])
@pytest.mark.parametrize('keep', [0.5, 0.75, 1])
@pytest.mark.parametrize('scale_factor', [0.5, 1])
def test_apply(self, Ximage: torch.Tensor, y: torch.Tensor, sb_algorithm: SelectiveBackprop, state: State,
empty_logger: Logger, keep: float) -> None:
"""Test apply with image inputs gives the right output shape."""
N, C, H, W = Ximage.shape
state.max_duration = '10ep'
state.batch = (Ximage, y)
sb_algorithm.apply(Event.INIT, state, empty_logger)
sb_algorithm.apply(Event.AFTER_DATALOADER, state, empty_logger)
X_scaled, y_scaled = state.batch
assert X_scaled.shape == (int(N * keep), C, H, W)
assert y_scaled.shape == (int(N * keep),)
| composer-dev | tests/algorithms/test_selective_backprop.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Test the blurpool algorithm.
Primitives are tested in test_blurpool.py
"""
from typing import List
from unittest.mock import Mock
import pytest
import torch
from composer.algorithms import BlurPool
from composer.algorithms.blurpool import apply_blurpool
from composer.algorithms.blurpool.blurpool_layers import BlurConv2d, BlurMaxPool2d
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
from tests.common import ConvModel
@pytest.fixture
def state(minimal_state: State):
minimal_state.model = ConvModel()
return minimal_state
@pytest.fixture(params=[
# replace_conv, replace_pool, blur_first
(True, True, True),
(True, True, False),
(True, False, True),
(True, False, False),
(False, True, True),
(False, True, False),
])
def blurpool_instance(request) -> BlurPool:
replace_conv, replace_pool, blur_first = request.param
return BlurPool(
replace_convs=replace_conv,
replace_maxpools=replace_pool,
blur_first=blur_first,
)
def test_blurconv(state: State, blurpool_instance: BlurPool, empty_logger: Logger):
blurpool_instance.apply(Event.INIT, state, empty_logger)
assert isinstance(state.model, ConvModel)
if blurpool_instance.replace_convs:
assert type(state.model.conv1) is BlurConv2d
else:
assert type(state.model.conv1) is torch.nn.Conv2d
def test_maybe_replace_strided_conv_stride(state: State, blurpool_instance: BlurPool, empty_logger: Logger):
blurpool_instance.apply(Event.INIT, state, empty_logger)
assert isinstance(state.model, ConvModel)
assert type(state.model.conv3) is torch.nn.Conv2d # stride = 1, should be no replacement
def test_maybe_replace_strided_conv_channels(state: State, blurpool_instance: BlurPool, empty_logger: Logger):
blurpool_instance.apply(Event.INIT, state, empty_logger)
assert isinstance(state.model, ConvModel)
assert type(state.model.conv2) is torch.nn.Conv2d # channels < 16, should be no replacement
def test_blurconv_weights_preserved(state: State, blurpool_instance: BlurPool, empty_logger: Logger):
assert isinstance(state.model, ConvModel)
original_weights = state.model.conv1.weight.clone()
blurpool_instance.apply(Event.INIT, state, empty_logger)
if isinstance(state.model.conv1, BlurConv2d):
new_weights = state.model.conv1.conv.weight
elif isinstance(state.model.conv1, torch.nn.Conv2d):
new_weights = state.model.conv1.weight
else:
raise TypeError(f'Layer type {type(state.model.conv1)} not expected.')
assert torch.allclose(original_weights, new_weights)
def test_blurpool(state: State, blurpool_instance: BlurPool, empty_logger: Logger):
blurpool_instance.apply(Event.INIT, state, empty_logger)
assert isinstance(state.model, ConvModel)
if blurpool_instance.replace_maxpools:
assert type(state.model.pool1) is BlurMaxPool2d
else:
assert type(state.model.pool1) is torch.nn.MaxPool2d
def test_blurpool_wrong_event(state: State, blurpool_instance: BlurPool):
assert blurpool_instance.match(Event.BATCH_START, state) == False
def test_blurpool_correct_event(state: State, blurpool_instance: BlurPool):
assert blurpool_instance.match(Event.INIT, state) == True
def test_blurpool_algorithm_logging(state: State, blurpool_instance: BlurPool):
mock_logger = Mock()
blurpool_instance.apply(Event.INIT, state, mock_logger)
mock_logger.log_hyperparameters.assert_called_once_with({
'blurpool/num_blurpool_layers': 1 if blurpool_instance.replace_maxpools else 0,
'blurpool/num_blurconv_layers': 1 if blurpool_instance.replace_convs else 0,
})
def test_blurpool_noeffectwarning():
model = torch.nn.Linear(in_features=16, out_features=32)
with pytest.warns(NoEffectWarning):
apply_blurpool(model)
def test_blurpool_min_channels():
model = torch.nn.Conv2d(in_channels=32, out_channels=64, stride=1, kernel_size=(3, 3))
with pytest.warns(NoEffectWarning):
apply_blurpool(model, min_channels=64)
def test_blurconv2d_optimizer_params_updated():
model = ConvModel()
original_layer = model.conv1
assert original_layer.stride == (2, 2) # fail fast if test model changes
optimizer = torch.optim.SGD(model.parameters(), lr=.01)
apply_blurpool(model, optimizers=optimizer)
new_layer = model.conv1
param_list: List[torch.Tensor] = optimizer.param_groups[0]['params']
# assert old parameters removed
assert not module_surgery._tensor_in(original_layer.weight, param_list)
# new params added
new_conv_layer = new_layer.conv
assert isinstance(new_conv_layer, torch.nn.Conv2d)
assert new_conv_layer.weight is not original_layer.weight
assert module_surgery._tensor_in(new_conv_layer.weight, param_list)
| composer-dev | tests/algorithms/test_blurpool.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import itertools
import pytest
import torch
from composer.algorithms import blurpool
def generate_pool_args():
n_vals = [2]
c_vals = [2]
size_vals = [(3, 3), (3, 7), (4, 4)]
strides = [1, 2]
filter_size_vals = [(1, 1), (1, 3), (3, 3)]
return list(itertools.product(n_vals, c_vals, size_vals, strides, filter_size_vals))
@pytest.mark.parametrize('pool_args', generate_pool_args())
def test_blurmaxpool_shapes(pool_args):
n, c, sz, stride, kernel_size = pool_args
X = torch.randn(n, c, sz[0], sz[1])
layer_args = {'kernel_size': kernel_size, 'stride': stride, 'dilation': 1}
blurpool_layer = blurpool.BlurMaxPool2d(**layer_args)
maxpool_layer = torch.nn.MaxPool2d(**layer_args)
assert blurpool_layer(X).shape == maxpool_layer(X).shape
@pytest.mark.parametrize('blur_first', [True, False])
@pytest.mark.parametrize('pool_args', generate_pool_args())
def test_blurconv2d_shapes(pool_args, blur_first):
n, c, sz, stride, kernel_size = pool_args
X = torch.randn(n, c, sz[0], sz[1])
layer_args = {'kernel_size': kernel_size, 'stride': stride, 'dilation': 1, 'in_channels': c, 'out_channels': c + 1}
blurconv2d_layer = blurpool.BlurConv2d(**layer_args, blur_first=blur_first)
conv2d_layer = torch.nn.Conv2d(**layer_args)
assert blurconv2d_layer(X).shape == conv2d_layer(X).shape
@pytest.mark.parametrize('pool_args', generate_pool_args())
def test_blur2d_shapes(pool_args):
n, c, sz, _, _ = pool_args
X = torch.randn(n, c, sz[0], sz[1])
out = blurpool.blur_2d(X)
assert out.shape == X.shape
def test_default_2d_filter():
def reference_filter():
filt = torch.FloatTensor([1, 2, 1])
filt = torch.outer(filt, filt)
filt *= 1. / filt.sum()
filt = torch.Tensor(filt)
return filt.view(1, 1, *filt.shape)
torch.testing.assert_close(
blurpool.blurpool_layers._default_2d_filter(), # type: ignore
reference_filter(),
)
@pytest.mark.parametrize('pool_args', generate_pool_args())
def test_blur2d_std(pool_args):
n, c, sz, _, _ = pool_args
X = torch.randn(n, c, sz[0], sz[1])
out = blurpool.blur_2d(X)
assert torch.std(out) <= torch.std(X)
def test_blurpool_blurconv2d_params_match_original_params():
conv2d = torch.nn.Conv2d(16, 32, 3, stride=1, bias=True)
blurconv = blurpool.BlurConv2d.from_conv2d(conv2d)
torch.testing.assert_close(blurconv.conv.weight, conv2d.weight)
torch.testing.assert_close(blurconv.conv.bias, conv2d.bias)
assert blurconv.conv.weight.requires_grad
assert blurconv.conv.bias is not None
assert blurconv.conv.bias.requires_grad
| composer-dev | tests/algorithms/test_blurpool_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.nn.functional as F
from composer.algorithms import LabelSmoothing, label_smoothing
from composer.core import Event
def _generate_tensors_classification(batch_size: int, num_classes: int):
"""Helper functions to generate input, target pairs for image classification (1d indices)"""
N = batch_size
C = num_classes
target_indices = torch.randint(0, C, [N])
target_onehot = F.one_hot(target_indices, num_classes=C)
input = F.softmax(torch.randn((N, C)), dim=1)
return (input, target_indices, target_onehot)
def _generate_tensors_segmentation(batch_size: int, num_classes: int, H: int, W: int):
"""Helper functions to generate input, target pairs for image segmentation (2d indices)"""
N = batch_size
C = num_classes
target_indices = torch.randint(0, C, (N, H, W))
target_onehot = F.one_hot(target_indices, num_classes=C) # NHWC
target_onehot = torch.movedim(target_onehot, -1, 1).contiguous() # NCHW
input = F.softmax(torch.randn((N, C, H, W)), dim=1)
return (input, target_indices, target_onehot)
def xfail(val):
"""shorthand to mark xfail parameters."""
return pytest.param(val, marks=pytest.mark.xfail)
def generate_tensors():
return [
# binary classification
_generate_tensors_classification(batch_size=64, num_classes=2),
# classification
_generate_tensors_classification(batch_size=64, num_classes=10),
# segmentation
_generate_tensors_segmentation(batch_size=64, num_classes=2, H=5, W=5),
_generate_tensors_segmentation(batch_size=64, num_classes=10, H=5, W=5)
]
@pytest.mark.parametrize('smoothing', [0, 0.1, 0.5, 0.9, 1.0])
@pytest.mark.parametrize('tensors', generate_tensors())
class TestLabelSmoothing:
@staticmethod
def reference_smooth_labels(targets, smoothing):
num_classes = targets.shape[1]
return targets * (1 - smoothing) + smoothing / num_classes
def test_label_smoothing(self, tensors, smoothing):
(input, target_indices, target_onehot) = tensors
labels_onehot = label_smoothing.smooth_labels(input, target_onehot, smoothing)
labels_indices = label_smoothing.smooth_labels(input, target_indices, smoothing)
labels_ref = self.reference_smooth_labels(target_onehot, smoothing)
torch.testing.assert_close(labels_onehot, labels_ref)
torch.testing.assert_close(labels_indices, labels_ref)
@pytest.mark.parametrize('target_type', ['onehot', 'indices'])
def test_label_smoothing_algorithm(self, tensors, smoothing, target_type, empty_logger, minimal_state):
(outputs, target_indices, target_onehot) = tensors
target = target_indices if target_type == 'indices' else target_onehot
algorithm = LabelSmoothing(smoothing=smoothing)
state = minimal_state
state.batch = (torch.Tensor(), target)
state.outputs = outputs
# BEFORE_LOSS should smooth the labels
algorithm.apply(Event.BEFORE_LOSS, state, empty_logger)
smoothed_reference = self.reference_smooth_labels(target_onehot, smoothing)
_, labels = state.batch
torch.testing.assert_close(labels, smoothed_reference)
# AFTER_LOSS should restore the original targets
algorithm.apply(Event.AFTER_LOSS, state, empty_logger)
_, labels = state.batch
torch.testing.assert_close(labels, target)
| composer-dev | tests/algorithms/test_label_smoothing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Type
import pytest
from composer import Algorithm, Trainer
from composer.algorithms import GyroDropout, LayerFreezing
from tests.algorithms.algorithm_settings import get_alg_dataloader, get_alg_kwargs, get_alg_model, get_algs_with_marks
@pytest.mark.gpu
@pytest.mark.parametrize('alg_cls', get_algs_with_marks())
def test_algorithm_trains(alg_cls: Type[Algorithm]):
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
dataloader = get_alg_dataloader(alg_cls)
trainer = Trainer(
model=model,
train_dataloader=dataloader,
max_duration='2ep',
algorithms=alg_cls(**alg_kwargs),
)
trainer.fit()
if alg_cls is LayerFreezing:
pytest.xfail(('Layer freezing is incompatible with a second call to .fit() '
'since all layers are frozen, and it does not unfreeze layers.'))
if alg_cls is GyroDropout:
pytest.xfail(
'GyroDropout is implemented to be applied on Event.FIT_START, so is not compatible with multiple calls to fit.'
)
# fit again for another epoch
trainer.fit(duration='1ep')
| composer-dev | tests/algorithms/test_algorithms_train.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from composer.algorithms import MixUp
from composer.algorithms.mixup.mixup import _gen_mixing_coef, mixup_batch
from composer.core import Event
from composer.models import ComposerClassifier
# (N, C, d1, d2, num_classes)
@pytest.fixture(params=[(7, 11, 3, 5, 10)])
def fake_data(request):
# Generate some fake data
N, C, d1, d2, num_classes = request.param
torch.manual_seed(0)
x_fake = torch.randn(N, C, d1, d2)
y_fake = torch.randint(num_classes, size=(N,))
indices = torch.randperm(N)
return x_fake, y_fake, indices
def validate_mixup_batch(x, y, indices, x_mix, y_perm, mixing):
# Explicitly check that the batches and labels have been mixed correctly.
for i in range(x.size(0)): # Grab N
j = indices[i]
# Check the input data
x_mix_test = (1 - mixing) * x[i] + mixing * x[j]
torch.testing.assert_close(x_mix_test, x_mix[i])
# Check the label
perm_label = y[j]
torch.testing.assert_close(perm_label, y_perm[i])
@pytest.mark.parametrize('alpha', [.2, 1])
@pytest.mark.parametrize('interpolate_loss', [True, False])
class TestMixUp:
def test_mixup_batch(self, fake_data, alpha, interpolate_loss):
# Generate fake data
x_fake, y_fake, indices = fake_data
# Get interpolation lambda based on alpha hparam
mixing = _gen_mixing_coef(alpha)
# Apply mixup
x_mix, y_perm, _ = mixup_batch(x_fake, y_fake, mixing=mixing, indices=indices)
# Validate results
validate_mixup_batch(x_fake, y_fake, indices, x_mix, y_perm, mixing)
def test_mixup_algorithm(self, fake_data, alpha, interpolate_loss, minimal_state, empty_logger):
# Generate fake data
x_fake, y_fake, _ = fake_data
algorithm = MixUp(alpha=alpha, interpolate_loss=interpolate_loss)
state = minimal_state
state.model = ComposerClassifier(torch.nn.Flatten(), num_classes=y_fake.shape[0])
state.batch = (x_fake, y_fake)
# Apply algo, use test hooks to specify indices and override internally generated interpolation lambda for testability
algorithm.apply(Event.BEFORE_FORWARD, state, empty_logger)
x, _ = state.batch
# Use algorithm generated indices and mixing_coef for validation
validate_mixup_batch(x_fake, y_fake, algorithm.indices, x, algorithm.permuted_target, algorithm.mixing)
| composer-dev | tests/algorithms/test_mixup.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import functools
from typing import Tuple
import numpy as np
import pytest
import torch
from PIL import Image
from torch.utils.data import DataLoader
from composer.algorithms import ColOut
from composer.algorithms.colout.colout import ColOutTransform, colout_batch
from composer.core import Event, State
from composer.loggers import Logger
from tests.common import RandomImageDataset
def verify_shape_image(orig: Image.Image, new: Image.Image, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed PIL Image."""
H_o, W_o = orig.height, orig.width
H_n, W_n = new.height, new.width
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert (H_n, W_n) == (H_t, W_t), f'Image shape mismatch: {(H_n, W_n)} != {(H_t, W_t)}'
def verify_shape_image_pair(orig_sample: Tuple[Image.Image, Image.Image], new_sample: Tuple[Image.Image, Image.Image],
p_row: float, p_col: float):
"""Verify the shape of a pair of transformed PIL images."""
H_o, W_o = orig_sample[0].height, orig_sample[0].width
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new_sample[0].size == (H_t, W_t), f'Input Image shape mismatch: {new_sample[0].size} != {(H_t, W_t)}'
assert new_sample[1].size == (H_t, W_t), f'Target image shape mismatch: {new_sample[1].size} != {(H_t, W_t)}'
def verify_shape_tensor(orig: torch.Tensor, new: torch.Tensor, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed image tensor."""
C, H_o, W_o = orig.shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new.shape == (C, H_t, W_t), f'Image tensor shape mismatch: {new.shape} != {(C, H_t, W_t)}'
def verify_shape_tensor_pair(orig_sample: Tuple[torch.Tensor, torch.Tensor],
new_sample: Tuple[torch.Tensor, torch.Tensor], p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed image tensor."""
C, H_o, W_o = orig_sample[0].shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new_sample[0].shape == (C, H_t, W_t), f'Input shape mismatch: {new_sample[0].shape} != {(C, H_t, W_t)}'
assert new_sample[1].shape == (C, H_t, W_t), f'Target shape mismatch: {new_sample[0].shape} != {(C, H_t, W_t)}'
def verify_shape_batch(orig: torch.Tensor, new: torch.Tensor, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed batch of images."""
N, C, H_o, W_o = orig.shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new.shape == (N, C, H_t, W_t), f'Image batch shape mismatch: {new.shape} != {(N, C, H_t, W_t)}'
def verify_shape_batch_pair(orig_sample: Tuple[torch.Tensor, torch.Tensor],
new_sample: Tuple[torch.Tensor, torch.Tensor], p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed batch of images."""
N, C, H_o, W_o = orig_sample[0].shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new_sample[0].shape == (N, C, H_t, W_t), f'Input shape mismatch: {new_sample[0].shape} != {(N, C, H_t, W_t)}'
assert new_sample[1].shape == (N, C, H_t,
W_t), f'Target shape mismatch: {new_sample[1].shape} != {(N, C, H_t, W_t)}'
@pytest.fixture(params=[False, True])
def batch(request) -> bool:
"""Algorithm batch parameter."""
return request.param
@pytest.fixture(params=[0, 0.15])
def p_row(request) -> float:
"""Algorithm p_row parameter."""
return request.param
@pytest.fixture
def p_col(p_row) -> float:
"""Algorithm p_col parameter."""
return p_row
@pytest.fixture(params=[1, 3])
def C(request) -> int:
"""Number of image channels.
Testing BW and RGB.
"""
return request.param
@pytest.fixture
def H(request) -> int:
"""Default image height."""
return 32
@pytest.fixture
def W(H) -> int:
"""Default image width (equal to height)"""
return H
@pytest.fixture
def fake_image(H: int, W: int, C: int) -> Image.Image:
"""Fake PIL Image."""
return Image.fromarray((255 * np.random.uniform(size=(H, W, C)).squeeze()).astype(np.uint8))
@pytest.fixture
def fake_image_tensor(H: int, W: int, C: int) -> torch.Tensor:
"""Fake image tensor."""
return torch.rand(C, H, W)
@pytest.fixture
def fake_image_batch(H: int, W: int, C: int) -> torch.Tensor:
"""Fake batch of images."""
return torch.rand(16, C, H, W)
@pytest.fixture
def colout_algorithm(p_row: float, p_col: float, batch: bool) -> ColOut:
"""Reusable algorithm instance."""
return ColOut(p_row, p_col, batch)
class TestColOutTransform:
def test_single_image_drop_size(self, fake_image: Image.Image, p_row: float, p_col: float):
"""Test application to single PIL image."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image)
assert isinstance(new_image, Image.Image)
verify_shape_image(fake_image, new_image, p_row, p_col)
def test_image_pair_drop_size(self, fake_image: Image.Image, p_row: float, p_col: float):
"""Test application to a 2-tuple of PIL images."""
transform = ColOutTransform(p_row, p_col)
orig_sample = (fake_image, fake_image)
new_sample = transform(orig_sample)
assert isinstance(new_sample, Tuple)
verify_shape_image_pair(orig_sample, new_sample, p_row, p_col)
@pytest.mark.parametrize('W', [48])
def test_rectangular_image(self, fake_image: Image.Image, p_row: float, p_col: float):
"""Test application to a rectangular PIL image."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image)
verify_shape_image(fake_image, new_image, p_row, p_col) # type: ignore
def test_single_image_tensor_drop_size(self, fake_image_tensor: torch.Tensor, p_row: float, p_col: float):
"""Test application to a single torch image tensor."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image_tensor)
verify_shape_tensor(fake_image_tensor, new_image, p_row, p_col) # type: ignore
def test_image_tensor_pair_drop_size(self, fake_image_tensor: torch.Tensor, p_row: float, p_col: float):
"""Test application to a single torch image tensor."""
transform = ColOutTransform(p_row, p_col)
orig_sample = (fake_image_tensor, fake_image_tensor)
new_sample = transform(orig_sample)
verify_shape_tensor_pair(orig_sample, new_sample, p_row, p_col) # type: ignore
def test_reproducibility_image(self, fake_image_tensor: torch.Tensor, p_row: float, p_col: float):
"""Test that transform is reproducible given the same seed."""
transform_1 = ColOutTransform(p_row, p_col)
transform_2 = ColOutTransform(p_row, p_col)
torch.manual_seed(42)
new_image_1 = transform_1(fake_image_tensor)
assert isinstance(new_image_1, torch.Tensor)
torch.manual_seed(42)
new_image_2 = transform_2(fake_image_tensor)
assert isinstance(new_image_2, torch.Tensor)
assert torch.allclose(new_image_1, new_image_2)
class TestColOutFunctional:
def test_reproducibility_batch(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test that batch augmentation is reproducible given the same seed."""
transform_1 = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
transform_2 = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
torch.manual_seed(42)
new_batch_1 = transform_1(fake_image_batch)
torch.manual_seed(42)
new_batch_2 = transform_2(fake_image_batch)
assert isinstance(new_batch_1, torch.Tensor)
assert isinstance(new_batch_2, torch.Tensor)
assert torch.allclose(new_batch_1, new_batch_2)
def test_batch_drop_size(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test application to a batch of images."""
colout = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
new_batch = colout(fake_image_batch)
assert isinstance(new_batch, torch.Tensor)
verify_shape_batch(fake_image_batch, new_batch, p_row, p_col)
def test_batch_pair_drop_size(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
colout = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
sample = (fake_image_batch, fake_image_batch)
new_batch = colout(sample)
assert isinstance(new_batch, Tuple) and isinstance(new_batch[0], torch.Tensor) and isinstance(
new_batch[1], torch.Tensor)
verify_shape_batch_pair(sample, new_batch, p_row, p_col)
@pytest.mark.parametrize('p_col', [0.05, 0.25])
def test_rectangle_batch_drop_size(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test that unequal values of p_row and p_col work properly."""
colout = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
new_batch = colout(fake_image_batch)
assert isinstance(new_batch, torch.Tensor)
verify_shape_batch(fake_image_batch, new_batch, p_row, p_col)
class TestColOutAlgorithm:
@pytest.mark.parametrize('event,batch', [(Event.AFTER_DATALOADER, True), (Event.FIT_START, False)])
def test_match_correct(self, event: Event, colout_algorithm: ColOut, minimal_state: State):
"""Algo should match AFTER_DATALOADER if batch else FIT_START."""
assert colout_algorithm.match(event, minimal_state)
@pytest.mark.parametrize('event,batch', [(Event.FIT_START, True), (Event.AFTER_DATALOADER, False),
(Event.EPOCH_END, True)])
def test_match_incorrect(self, event: Event, colout_algorithm: ColOut, minimal_state: State):
"""Algo should NOT match FIT_START if batch else AFTER_DATALOADER."""
assert not colout_algorithm.match(event, minimal_state)
@pytest.mark.parametrize('batch', [True])
def test_apply_batch(self, fake_image_batch: torch.Tensor, colout_algorithm: ColOut, minimal_state: State,
empty_logger: Logger):
"""Applies the algorithm to a fake batch."""
p_row = colout_algorithm.p_row
p_col = colout_algorithm.p_col
minimal_state.batch = (fake_image_batch, torch.Tensor())
colout_algorithm.apply(Event.AFTER_DATALOADER, minimal_state, empty_logger)
last_input, last_target = minimal_state.batch
verify_shape_batch(fake_image_batch, last_input, p_row, p_col)
assert id(minimal_state.batch[1]) == id(last_target) # Check that the target before and after are the same
@pytest.mark.parametrize('batch', [True])
def test_apply_batch_pair(self, fake_image_batch: torch.Tensor, colout_algorithm: ColOut, minimal_state: State,
empty_logger: Logger):
"""Applies batch ColOut to 2-tuple of images."""
p_row = colout_algorithm.p_row
p_col = colout_algorithm.p_col
orig_sample = (fake_image_batch, fake_image_batch)
minimal_state.batch = orig_sample
colout_algorithm.apply(Event.AFTER_DATALOADER, minimal_state, empty_logger)
new_sample = minimal_state.batch
verify_shape_batch_pair(orig_sample, new_sample, p_row, p_col)
@pytest.mark.parametrize('batch', [False])
def test_apply_sample(self, colout_algorithm: ColOut, minimal_state: State, empty_logger: Logger):
"""Test that augmentation is added to dataset and functioning properly."""
p_row = colout_algorithm.p_row
p_col = colout_algorithm.p_col
dataset = RandomImageDataset(is_PIL=True)
dataloader = DataLoader(dataset)
original_image, _ = dataset[0]
assert isinstance(original_image, Image.Image)
minimal_state.set_dataloader(dataloader, 'train')
colout_algorithm.apply(Event.FIT_START, minimal_state, empty_logger)
new_image, _ = dataset[0]
assert isinstance(new_image, Image.Image)
verify_shape_image(original_image, new_image, p_row, p_col)
@pytest.mark.parametrize('p_row,p_col', [(1.5, 0.15), (0.15, 1.5)])
def test_invalid_hparams(p_row: float, p_col: float):
"""Test that invalid hyperparameters error."""
with pytest.raises(ValueError):
ColOut(p_row, p_col, False)
| composer-dev | tests/algorithms/test_colout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
import pytest
from composer.algorithms.seq_length_warmup import SeqLengthWarmup, set_batch_sequence_length
from composer.core import Event, State
from composer.devices import DeviceCPU
from composer.loggers import Logger
from tests.common.datasets import (dummy_bert_lm_dataloader, dummy_gpt_lm_dataloader,
dummy_text_classification_dataloader)
from tests.common.models import SimpleTransformerClassifier, configure_tiny_bert_hf_model, configure_tiny_gpt2_hf_model
def check_batch_truncation(before, after, length, preserve_end_of_sequence=False):
before_lengths = [int(m.sum()) for m in before['attention_mask']]
# Just make sure the lengths are correct
for k in before.keys():
assert k in after, 'No keys should be removed during sequence truncation.'
assert before[k].shape[0] == after[k].shape[
0], 'The batch size should not be changed during sequence truncation.'
if before[k].ndim >= 2:
assert after[k].shape[1] == min(before[k].shape[1], length), 'Incorrect sequence length after truncation.'
if preserve_end_of_sequence:
# The last valid token before truncation should still be the last valid token
for seq_before, seq_after, before_length in zip(before[k], after[k], before_lengths):
assert seq_after[min(length, before_length) - 1] == seq_before[before_length - 1]
for k in after.keys():
assert k in before, 'No keys should be added during sequence truncation'
def check_batch_non_truncation(before, after, length):
# Make sure all the batch tensors have the same shape
input_ids_after_shape = after['input_ids'].shape
# Just make sure the lengths are correct
for k in before.keys():
assert k in after, 'No keys should be removed during sequence reshaping.'
assert after[
k].shape == input_ids_after_shape, 'All tensors should have the same size after sequence reshaping.'
b_numel = before[k].shape[0] * before[k].shape[1]
a_numel = after[k].shape[0] * after[k].shape[1]
assert a_numel >= b_numel - length, 'Sequence reshaping should throw away at most curr_sequence_length tokens.'
import torch
assert torch.all(after[k][0] == before[k][
0, :input_ids_after_shape[1]]), 'Sequence reshaping should not change the token order.'
for k in after.keys():
assert k in before, 'No keys should be added during sequence reshaping.'
def check_batch(before, after, length, truncate: bool, preserve_end_of_sequence: bool):
if truncate:
check_batch_truncation(before, after, length, preserve_end_of_sequence)
else:
check_batch_non_truncation(before, after, length)
def check_forward_backward(model, batch):
model.zero_grad()
output = model.forward(batch)
output['loss'].backward()
@pytest.mark.parametrize('model, dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(configure_tiny_gpt2_hf_model, dummy_gpt_lm_dataloader),
(pytest.param(
SimpleTransformerClassifier,
dummy_text_classification_dataloader,
marks=pytest.mark.xfail(reason='Gated Linear Units does not currently support non-HuggingFace models'))),
])
@pytest.mark.parametrize('truncate,preserve_end_of_sequence', [(True, True), (True, False), (False, False)])
class TestSeqLengthWarmup:
@pytest.mark.parametrize('curr_seq_length', [8, 64])
def test_functional(self, model, dataloader, curr_seq_length: int, truncate: bool, preserve_end_of_sequence: bool):
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=DeviceCPU(),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
batch_before = next(iter(dataloader))
batch_after = set_batch_sequence_length(deepcopy(batch_before), curr_seq_length, truncate,
preserve_end_of_sequence)
check_batch(batch_before, batch_after, curr_seq_length, truncate, preserve_end_of_sequence)
check_forward_backward(state.model, batch_after)
def test_algorithm(self, model, dataloader, empty_logger: Logger, truncate: bool, preserve_end_of_sequence: bool):
model = model()
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=DeviceCPU(),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
)
# Synthetic dataset has a size of 2 batches per epoch (max duration = 1ep)
seq_length_warmup = SeqLengthWarmup(duration=0.5,
min_seq_length=8,
max_seq_length=16,
truncate=truncate,
preserve_end_of_sequence=preserve_end_of_sequence)
seq_length_warmup.apply(Event.INIT, state, empty_logger)
batch_before = next(iter(dataloader))
state.batch = deepcopy(batch_before)
seq_length_warmup.apply(Event.AFTER_DATALOADER, state, empty_logger)
# At this point, we should see the MINIMUM sequence length after truncation
check_batch(batch_before, state.batch, seq_length_warmup.min_seq_length, truncate, preserve_end_of_sequence)
check_forward_backward(state.model, state.batch)
# Note: max duration is 1 epoch
state.timestamp = state.timestamp.to_next_batch(samples=state.batch['input_ids'].shape[0])
batch_before = next(iter(dataloader))
state.batch = deepcopy(batch_before)
seq_length_warmup.apply(Event.AFTER_DATALOADER, state, empty_logger)
# At this point, we should see the MAXIMUM sequence length after truncation
check_batch(batch_before, state.batch, seq_length_warmup.max_seq_length, truncate, preserve_end_of_sequence)
check_forward_backward(state.model, state.batch)
| composer-dev | tests/algorithms/test_seq_length_warmup.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Written by Gihyun Park, Junyeol Lee, and Jiwon Seo
import torch
from composer.algorithms.gyro_dropout import GyroDropoutLayer
def test_gyro_dropout_masking():
batch_size = 256
output_feature = 512
x = torch.randn(batch_size, output_feature)
dropout_layer = GyroDropoutLayer(
iters_per_epoch=196,
max_epoch=100,
p=0.5,
sigma=256,
tau=16,
)
y = dropout_layer(x)
mask = dropout_layer.dropout_mask
p = dropout_layer.p
for i in range(batch_size):
for j in range(output_feature):
assert x[i][j] * mask[i][j] * (1 / (1 - p)) == y[i][j]
def test_gyro_dropout_mask_pattern():
batch_size = 256
output_feature = 512
x = torch.randn(batch_size, output_feature)
dropout_layer = GyroDropoutLayer(
iters_per_epoch=196,
max_epoch=100,
p=0.5,
sigma=256,
tau=16,
)
_ = dropout_layer(x)
mask = dropout_layer.dropout_mask
tau = dropout_layer.tau
pivot = 0
for i in range(output_feature):
for j in range(batch_size):
if j % tau == 0:
pivot = mask[j][i]
else:
assert pivot == mask[j][i]
| composer-dev | tests/algorithms/test_gyro_dropout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import pytest
import torch
from composer.algorithms import factorize
@dataclass
class _RankReduce(object):
"""This is just here for convenience when testing."""
batch_size: int = 100
C_out: int = 64
C_in: int = 32
C_latent_now: int = 16
C_latent_new: int = 8
n_iters: int = 2
seed: int = 123
op: str = 'linear'
def __call__(self, already_factorized=False):
torch.manual_seed(self.seed)
X = torch.randn(self.batch_size, self.C_in)
bias = torch.randn(self.C_out)
if already_factorized:
Wa = torch.randn(self.C_in, self.C_latent_now)
Wb = torch.randn(self.C_latent_now, self.C_out)
Y = (X @ Wa) @ Wb + bias
else:
Wa = torch.randn(self.C_in, self.C_out)
Wb = None
Y = X @ Wa + bias
return factorize.factorize_matrix(X, Y, Wa, Wb, bias=bias, rank=self.C_latent_new, n_iters=self.n_iters)
@dataclass
class _RankReduceConv2d(object):
"""This is just here for convenience when testing."""
batch_size: int = 1
H: int = 4
W: int = 4
kernel_size: Tuple[int, int] = (3, 3)
C_in: int = 32
C_latent_now: int = 16
C_latent_new: int = 6
C_out: int = 24
n_iters: int = 2
seed: int = 123
op: str = 'conv2d'
def __call__(self, already_factorized=False):
torch.manual_seed(self.seed)
X = torch.randn(1, self.C_in, self.H, self.W) # NCHW
if already_factorized:
Wa = torch.randn(self.C_latent_now, self.C_in, *self.kernel_size)
Wb = torch.randn(self.C_out, self.C_latent_now, 1, 1)
biasA = torch.randn(self.C_latent_now)
biasB = torch.randn(self.C_out)
else:
Wa = torch.randn(self.C_out, self.C_in, *self.kernel_size)
Wb = None
biasA = torch.randn(self.C_out)
biasB = None
return factorize.factorize_conv2d(X,
Wa,
Wb,
biasA=biasA,
biasB=biasB,
rank=self.C_latent_new,
n_iters=self.n_iters)
@pytest.fixture(params=[_RankReduce(), _RankReduceConv2d()])
def factorize_task(request):
return request.param
def _check_factorization(f: Union[_RankReduce, _RankReduceConv2d],
prev_nmse: Optional[float] = None,
already_factorized: bool = True):
info = f(already_factorized=already_factorized)
Wa = info.Wa
Wb = info.Wb
bias = info.bias # one bias because only 2nd op needs one
nmse = info.nmse
op = f.op
if op == 'linear':
in_dim = 0
out_dim = 1
elif op == 'conv2d':
in_dim = 1
out_dim = 0
else:
raise ValueError('Invalid op: ', op)
k = f.C_latent_new
if k >= f.C_in or k >= f.C_out:
# no point in factorizing with latent dim bigger than
# either input or output; so just regress input onto output
assert Wa is not None
assert Wb is None
assert Wa.shape[in_dim] == f.C_in
assert Wa.shape[out_dim] == f.C_out
elif k >= f.C_latent_now:
# no need to factorize any futher than current factorization
assert Wa is not None
assert Wa.shape[in_dim] == f.C_in
assert Wa.shape[out_dim] == f.C_latent_now
else:
# actually needed to factorize
assert bias is not None
assert Wa is not None
assert Wb is not None
assert bias.ndim == 1
assert bias.shape[0] == f.C_out
assert Wa.shape[in_dim] == f.C_in
assert Wa.shape[out_dim] == f.C_latent_new
assert Wb is not None # should have actually factorized weights
assert Wb.shape[in_dim] == f.C_latent_new
assert Wb.shape[out_dim] == f.C_out
assert nmse < 1.0 # should explain variance better than just predicting mean
if prev_nmse is not None:
assert nmse <= prev_nmse + 1e-8 # error decreases over time
return nmse # new "previous" nmse
@pytest.mark.parametrize(
'shapes',
[
(16, 16, 16, 16), # all the same
(16, 8, 16, 16), # already low rank
(16, 8, 16, 16), # requested rank > current latent rank
(16, 16, 32, 16), # requested rank > input rank
(16, 16, 16, 8), # requested rank > output rank
(32, 16, 16, 16), # requested rank >= output rank, and underdetermined
])
@pytest.mark.parametrize('already_factorized', [False, True])
def test_factorize_edge_cases(shapes, factorize_task, already_factorized):
"""Test edge cases regarding current and requested matrix shapes."""
C_in, C_latent_now, C_latent_new, C_out = shapes
factorize_task.C_in = C_in
factorize_task.C_latent_now = C_latent_now
factorize_task.C_latent_new = C_latent_new
factorize_task.C_out = C_out
_check_factorization(factorize_task, already_factorized=already_factorized)
@pytest.mark.parametrize('already_factorized', [False, True])
def test_factorize_more_dims_better(factorize_task, already_factorized):
"""More latent dimensions should yield nonincreasing error."""
prev_nmse = np.inf
for C_latent_new in [1, 4, 16, 32]:
factorize_task.C_latent_new = C_latent_new
maybe_nmse = _check_factorization(factorize_task, prev_nmse, already_factorized=already_factorized)
prev_nmse = maybe_nmse if maybe_nmse else prev_nmse
@pytest.mark.parametrize('already_factorized', [False, True])
def test_factorize_more_iters_better(factorize_task, already_factorized):
"""More optimization iters should yield nonincreasing error."""
prev_nmse = np.inf
for n_iters in [0, 1, 2, 4]:
factorize_task.n_iters = n_iters
maybe_nmse = _check_factorization(factorize_task, prev_nmse, already_factorized=already_factorized)
prev_nmse = maybe_nmse if maybe_nmse else prev_nmse
| composer-dev | tests/algorithms/test_factorize_core.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import copy
import os
import pathlib
from typing import Type
import pytest
import torch
from composer import Trainer, algorithms
from composer.callbacks import CheckpointSaver
from composer.core import Algorithm, Time, TimeUnit # type: ignore imports used in `eval(representation)`
from composer.models import ComposerClassifier, ComposerModel, composer_resnet
from tests.common import ConvModel
def initialize_algorithm(algo_cls: Type):
"""Initialize algorithm with dummy values."""
if algo_cls == algorithms.Alibi:
return algo_cls(max_sequence_length=1)
elif algo_cls == algorithms.StochasticDepth:
return algo_cls(target_layer_name='ResNetBottleneck')
elif algo_cls == algorithms.FusedLayerNorm or algorithms.LowPrecisionLayerNorm:
pytest.importorskip('apex')
return algo_cls()
elif algo_cls == algorithms.GatedLinearUnits:
pytest.importorskip('transformers')
return algo_cls()
elif algo_cls == algorithms.Factorize:
return algo_cls(min_features=48, latent_features=24)
elif algo_cls == algorithms.SqueezeExcite:
return algo_cls(min_channels=32)
else:
return algo_cls()
@pytest.mark.parametrize('algo_name', algorithms.__all__)
def test_required_on_load_has_repr(algo_name: str):
algo_cls = getattr(algorithms, algo_name)
if issubclass(algo_cls, Algorithm) and algo_cls.required_on_load():
representation = repr(initialize_algorithm(algo_cls))
# Default repr prints memory address
assert 'at 0x' not in representation
eval(f'algorithms.{representation}')
def compare_models(model_1: torch.nn.Module, model_2: torch.nn.Module, is_equal: bool = True):
"""Check if two models are the same.
To check equivalence, we first verify the modules are the same in each model by checking the
list of attributes for each module. This checks if the same set of modules are used and if any
module has been marked by adding an attribute. Next, we directly iterate over the state dict
and verify the parameters are equal.
This is not a comprehensive comparison. For example, an algorithm could silently monkeypatch
over a forward function for a module. However, it is sufficient to provide coverage for our
existing set of algorithms.
"""
with contextlib.nullcontext() if is_equal else pytest.raises(Exception):
# Compare model module attributes since algorithms like StochasticDepth monkeypatch
# on new attributes. We only check this on ComposerClassifier models that have .module
if isinstance(model_1, ComposerClassifier) and isinstance(model_2, ComposerClassifier):
model_1_modules = list(model_1.module.modules())
model_2_modules = list(model_2.module.modules())
assert len(model_1_modules) == len(model_2_modules)
for module_1, module_2 in zip(model_1_modules, model_2_modules):
assert sorted(list(module_1.__dict__.keys())) == sorted(list(module_2.__dict__.keys()))
# Compare model parameters
for (name0, tensor0), (name1, tensor1) in zip(model_1.state_dict().items(), model_2.state_dict().items()):
assert name0 == name1
assert torch.equal(tensor0, tensor1)
@pytest.mark.filterwarnings('ignore:No instances of')
@pytest.mark.parametrize('algo_name', algorithms.__all__)
def test_idempotent(algo_name: str, tiny_bert_config):
algo_cls = getattr(algorithms, algo_name)
if issubclass(algo_cls, Algorithm) and algo_cls.required_on_load():
algorithm = initialize_algorithm(algo_cls)
original_model = None
if algo_name == 'StochasticDepth':
original_model = composer_resnet(model_name='resnet50')
elif algo_name in ['Alibi', 'GatedLinearUnits']:
transformers = pytest.importorskip('transformers')
from composer.models import HuggingFaceModel
hf_model = transformers.AutoModelForSequenceClassification.from_config(tiny_bert_config)
original_model = HuggingFaceModel(hf_model, use_logits=True)
else:
original_model = ConvModel()
applied_once_model = Trainer(
model=copy.deepcopy(original_model),
algorithms=algorithm,
).state.model
assert isinstance(applied_once_model, ComposerModel) # Assert type for pyright deepcopy
applied_twice_model = Trainer(
model=copy.deepcopy(applied_once_model),
algorithms=algorithm,
).state.model
compare_models(original_model, applied_twice_model, is_equal=False) # Surgery actually changes model
compare_models(applied_once_model, applied_twice_model, is_equal=True) # Multiple applications are no-ops
@pytest.mark.parametrize('algo_name', algorithms.__all__)
@pytest.mark.parametrize('load_weights_only,already_added,exclude', [
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
])
def test_autoload(algo_name: str, load_weights_only: bool, already_added: bool, exclude: bool, tmp_path: pathlib.Path,
tiny_bert_config):
algo_cls = getattr(algorithms, algo_name)
if issubclass(algo_cls, Algorithm) and algo_cls.required_on_load():
algorithm = initialize_algorithm(algo_cls)
original_model = None
if algo_name == 'StochasticDepth':
original_model = composer_resnet(model_name='resnet50')
elif algo_name in ['Alibi', 'GatedLinearUnits']:
transformers = pytest.importorskip('transformers')
from composer.models import HuggingFaceModel
hf_model = transformers.AutoModelForSequenceClassification.from_config(tiny_bert_config)
original_model = HuggingFaceModel(hf_model, use_logits=True)
else:
original_model = ConvModel()
trainer1 = Trainer(model=copy.deepcopy(original_model),
algorithms=algorithm,
save_folder=str(tmp_path),
save_filename='ckpt.pt')
checkpoint_saver = [cb for cb in trainer1.state.callbacks if isinstance(cb, CheckpointSaver)][0]
checkpoint_saver._save_checkpoint(trainer1.state, trainer1.logger)
context = contextlib.nullcontext()
# Emit warning when autoloading
if not already_added and not exclude:
context = pytest.warns(UserWarning, match='Automatically adding required_on_load algorithm*')
# Excluding some algorithms leads to errors when loading
elif exclude:
if algo_name in ['Factorize', 'SqueezeExcite']:
context = pytest.raises(
ValueError,
match=
"loaded state dict contains a parameter group that doesn't match the size of optimizer's group",
)
elif algo_name == 'Alibi':
context = pytest.raises(RuntimeError)
with context:
trainer2 = Trainer(
model=copy.deepcopy(original_model),
algorithms=[initialize_algorithm(algo_cls)] if already_added else [],
load_path=os.path.join(str(tmp_path), 'ckpt.pt'),
load_weights_only=load_weights_only,
load_exclude_algorithms=[algo_name] if exclude else None,
)
# No algorithms are added if we've excluded them
if exclude:
assert len(trainer2.state.algorithms) == 0
# Otherwise, check exactly one copy of algorithm is present
else:
assert len(trainer2.state.algorithms) == 1
assert isinstance(trainer2.state.algorithms[0], algo_cls)
| composer-dev | tests/algorithms/test_required_on_load.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
@pytest.fixture
def state(minimal_state):
# make any test-specific needed modifications
# e.g. adding a conv model, or changing the dataloader
return minimal_state
# Every algorithm test should have the functional and the algorithm usage demonstrated below in the tests.
def test_myalgo_functional():
...
def test_myalgo_algorithm(state, empty_logger):
...
# Results from logging and hparams initialization should also be tested.
def test_myalgo_logging(state):
"""Test that the logging is as expected.
Example:
logger_mock = Mock()
algorithm = AlgorithmThatLogsSomething()
algorithm.apply(Event.INIT, state, logger=logger_mock)
logger_mock.log_hyperparameters.assert_called_one_with({
'some_key': some_value
})
"""
# The above is the minimal set, the
# rest of the test suite will varying depending
# on the exact algorithm under test.
| composer-dev | tests/algorithms/algorithm_test_template.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from unittest.mock import Mock
import pytest
import torch
from composer.algorithms import LayerFreezing
from composer.core import Event, Precision, State, Timestamp
from composer.devices import DeviceCPU, DeviceGPU
from composer.loggers import Logger
from tests.common import SimpleConvModel, SimpleTransformerClassifier
from tests.common.models import configure_tiny_bert_hf_model
def _generate_state(request: pytest.FixtureRequest, model_cls, epoch: int, max_epochs: int):
"""Generates a state and fast forwards the timestamp by epochs."""
model = model_cls()
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(model=model,
rank_zero_seed=0,
device=device,
run_name='run_name',
optimizers=torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.99),
precision=Precision.FP32,
dataloader=Mock(__len__=lambda x: 100),
dataloader_label='train',
device_train_microbatch_size=1,
max_duration=f'{max_epochs}ep')
# fast forward by epochs
state.timestamp = Timestamp(epoch=epoch)
return state
def _assert_param_groups_equal(expected_groups, actual_groups):
assert len(expected_groups) == len(actual_groups), 'Incorrect number of param groups'
for i, expected_group in enumerate(expected_groups):
assert len(expected_group) == len(actual_groups[i]), \
f'Group {i} has the wrong number of parameters'
for j, expected_params in enumerate(expected_group['params']):
assert (actual_groups[i]['params'][j] == expected_params).all()
@pytest.mark.parametrize('model_cls', [SimpleConvModel, SimpleTransformerClassifier, configure_tiny_bert_hf_model])
def test_freeze_layers_no_freeze(model_cls, empty_logger: Logger, request: pytest.FixtureRequest):
state = _generate_state(request, model_cls, epoch=10, max_epochs=100)
first_optimizer = state.optimizers[0]
expected_param_groups = deepcopy(first_optimizer.param_groups)
freezing = LayerFreezing(freeze_start=0.5, freeze_level=1.0)
freezing.apply(event=Event.EPOCH_END, state=state, logger=empty_logger)
updated_param_groups = first_optimizer.param_groups
_assert_param_groups_equal(expected_param_groups, updated_param_groups)
@pytest.mark.parametrize('model_cls', [SimpleConvModel, SimpleTransformerClassifier, configure_tiny_bert_hf_model])
def test_freeze_layers_with_freeze(model_cls, empty_logger: Logger, request: pytest.FixtureRequest):
state = _generate_state(request, model_cls, epoch=80, max_epochs=100)
first_optimizer = state.optimizers[0]
expected_param_groups = deepcopy(first_optimizer.param_groups)
freezing = LayerFreezing(freeze_start=0.05, freeze_level=1.0)
freezing.apply(event=Event.EPOCH_END, state=state, logger=empty_logger)
updated_param_groups = first_optimizer.param_groups
# The first group should be removed due to freezing
expected_param_groups[0]['params'] = []
_assert_param_groups_equal(expected_param_groups, updated_param_groups)
| composer-dev | tests/algorithms/test_layer_freezing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from composer.algorithms.cutout.cutout import CutOut, _generate_mask
from composer.core import Event
# Box validaton checks for a continuous rectangle, cannot handle multiple/coalesced boxes along x, y dimensions
def _box_validate(mask_box: torch.Tensor) -> None:
# Box is not contiguous if there are any 0's in the tensor
box_is_contiguous = not (0 in mask_box)
assert box_is_contiguous
def _find_box(img_2d: torch.Tensor) -> torch.Tensor:
height, width = img_2d.size()
# Generate helper tensors
ones = torch.ones(height, width)
zeros = torch.zeros(height, width)
# Find the box
# First create h x w filter populated with ones where it thinks there's a box, then find coordinates of ones
filter_box = torch.where(img_2d == 0, ones, zeros)
box_x, box_y = torch.nonzero(filter_box, as_tuple=True) # Find all points where filter_box is 1
# Check for no box found
if ((box_x.size()[0], box_y.size()[0]) == (0, 0)):
# Return valid box as this is possible when cutout_length=1
return torch.ones(1, 1)
else:
# Returns box defined by longest diagonal
return filter_box[box_x[0]:box_x[-1] + 1, box_y[0]:box_y[-1] + 1]
def check_box(batch_size, channels, input):
for b in range(batch_size):
for c in range(channels):
mask_box = _find_box(input[b, c, :, :])
_box_validate(mask_box)
# Test square, rectangle inputs
@pytest.fixture(params=[(1, 1, 16, 16), (1, 1, 16, 32)])
def tensor_sizes(request):
return request.param
# cutout_length=1 won't 0 out (no box is valid)
# cutout_length=3 should produce 2x2 box due to floor division except when boundary clipping
# cutout_length=4 should produce 4x4 box due except when boundary clipping
# cutout_length=0.5 should produce a box with half the side length of the input
@pytest.fixture(params=[1, 3, 4])
def cutout_length_pixels(request):
return request.param
# Check corners, edges and middle
@pytest.fixture(params=[(0, 0), (16, 0), (0, 16), (16, 16), (7, 7)])
def anchors(request):
return request.param
def test_cutout_mask(tensor_sizes, cutout_length_pixels, anchors):
batch_size, channels, height, width = tensor_sizes
x, y = anchors
test_mask = torch.ones(tensor_sizes)
test_mask = _generate_mask(mask=test_mask, width=width, height=height, x=x, y=y, cutout_length=cutout_length_pixels)
check_box(batch_size, channels, test_mask)
@pytest.mark.parametrize('batch_size', [1, 4])
@pytest.mark.parametrize('channels', [1, 4])
@pytest.mark.parametrize('height', [32, 64])
@pytest.mark.parametrize('width', [32, 71])
@pytest.mark.parametrize('cutout_length', [0.25, 0.5])
@pytest.mark.parametrize('uniform_sampling', [True, False])
def test_cutout_algorithm(batch_size, channels, height, width, cutout_length, empty_logger, minimal_state,
uniform_sampling):
# Initialize input tensor
# - Add bias to prevent 0. pixels, causes check_box() to fail since based on search for 0's
# - Real data can have 0. pixels but this will not affect cutout algorithm since mask is generated independent of input data
input = torch.rand((batch_size, channels, height, width)) + 1
# Fix cutout_num_holes=1, mask generation is additive and box validation isn't smart enough to detect multiple/coalesced boxes
algorithm = CutOut(num_holes=1, length=cutout_length, uniform_sampling=uniform_sampling)
state = minimal_state
state.batch = (input, torch.Tensor())
algorithm.apply(Event.AFTER_DATALOADER, state, empty_logger)
input, _ = state.batch
check_box(batch_size, channels, input)
| composer-dev | tests/algorithms/test_cutout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import Mock
import pytest
import torch
from composer.algorithms import SqueezeExcite, SqueezeExciteConv2d
from composer.core import Event, State
from composer.functional import apply_squeeze_excite as apply_se
from composer.loggers import Logger
from composer.utils import module_surgery
from tests.common import SimpleConvModel
@pytest.fixture
def state(minimal_state: State):
"""SE tests require a conv model."""
minimal_state.model = SimpleConvModel(num_channels=32)
return minimal_state
def test_se_functional():
model = SimpleConvModel()
num_conv_layers = module_surgery.count_module_instances(model, torch.nn.Conv2d)
apply_se(model, latent_channels=64, min_channels=3)
num_se_layers = module_surgery.count_module_instances(model, SqueezeExciteConv2d)
assert num_conv_layers == num_se_layers
def test_se_algorithm(state: State, empty_logger: Logger):
num_conv_layers = module_surgery.count_module_instances(state.model, torch.nn.Conv2d)
algorithm = SqueezeExcite(latent_channels=64, min_channels=3)
algorithm.apply(
event=Event.INIT,
state=state,
logger=empty_logger,
)
num_se_layers = module_surgery.count_module_instances(state.model, SqueezeExciteConv2d)
assert num_conv_layers == num_se_layers
def test_se_logging(state: State, empty_logger: Logger):
logger_mock = Mock()
se = SqueezeExcite(latent_channels=64, min_channels=3)
se.apply(Event.INIT, state, logger=logger_mock)
conv_count = module_surgery.count_module_instances(state.model, torch.nn.Conv2d)
logger_mock.log_hyperparameters.assert_called_once_with({
'squeeze_excite/num_squeeze_excite_layers': conv_count,
})
def test_se_forward_shape(state: State):
batch = (torch.Tensor(8, 32, 64, 64), None) # NCHW
output = state.model.forward(batch)
apply_se(state.model, latent_channels=32, min_channels=3)
new_output = state.model.forward(batch)
assert output.size() == new_output.size()
| composer-dev | tests/algorithms/test_squeeze_excite.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Tuple, Union, cast
import numpy as np
import pytest
import torch
from PIL.Image import Image as PillowImage
from PIL.Image import fromarray
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.functional import augmix_image, colout_batch, cutout_batch, randaugment_image
AnyImage = Union[torch.Tensor, PillowImage]
InputAugFunction = Callable[[AnyImage], AnyImage]
def _input_image(img_type: str, dtype: torch.dtype) -> AnyImage:
rng = np.random.default_rng(123)
torch.manual_seed(123)
N, H, W, C = 4, 6, 5, 3
if img_type == 'pillow':
ints = rng.integers(256, size=(H, W, C)).astype(np.uint8)
return fromarray(ints, mode='RGB')
elif dtype == torch.uint8:
if img_type == 'single_tensor':
return torch.randint(256, size=(C, H, W)).to(dtype=torch.uint8)
return torch.randint(256, size=(N, C, H, W)).to(dtype=torch.uint8)
elif dtype in (torch.float16, torch.float, torch.float64):
if img_type == 'single_tensor':
return torch.rand(size=(C, H, W)).to(dtype=dtype)
return torch.rand(size=(N, C, H, W)).to(dtype=dtype)
else:
raise ValueError(f'Invalid dtype: {dtype}')
def _input_output_pair(img_type: str, img_dtype: torch.dtype, f_aug: InputAugFunction) -> Tuple[AnyImage, AnyImage]:
img = _input_image(img_type, dtype=img_dtype)
return img, f_aug(img)
@pytest.fixture(params=(torch.uint8, torch.float16, torch.float, torch.float64))
def img_dtype(request) -> torch.dtype:
return request.param
@pytest.mark.parametrize('img_type', ['pillow', 'single_tensor', 'batch_tensor'])
@pytest.mark.parametrize('f_aug', [colout_batch, cutout_batch, augmix_image, randaugment_image])
def test_batch_augmentation_funcs_preserve_type(img_type: str, img_dtype: torch.dtype, f_aug: InputAugFunction):
img, out = _input_output_pair(img_type, img_dtype, f_aug)
assert type(out) == type(img)
@pytest.mark.parametrize('img_type', ['pillow', 'single_tensor', 'batch_tensor'])
@pytest.mark.parametrize('f_aug', [cutout_batch, augmix_image, randaugment_image]) # colout changes shape
def test_batch_augmentation_funcs_preserve_shape(img_type: str, img_dtype: torch.dtype, f_aug: InputAugFunction):
img, out = _input_output_pair(img_type, img_dtype, f_aug)
if img_type == 'pillow':
img = cast(PillowImage, img)
out = cast(PillowImage, out)
img = image_as_type(img, torch.Tensor)
out = image_as_type(out, torch.Tensor)
assert isinstance(img, torch.Tensor)
assert isinstance(out, torch.Tensor)
assert out.shape == img.shape
| composer-dev | tests/algorithms/test_augmentations_functional.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.nn import LayerNorm
from composer.algorithms.low_precision_layernorm import LowPrecisionLayerNorm, apply_low_precision_layernorm
from composer.algorithms.low_precision_layernorm.low_precision_layernorm import LPLayerNorm
from composer.core import Event, State
from composer.loggers import Logger
from composer.models.huggingface import HuggingFaceModel
from composer.utils import get_device
from tests.common import device
from tests.common.datasets import dummy_bert_lm_dataloader, dummy_text_classification_dataloader
from tests.common.models import SimpleTransformerClassifier, configure_tiny_bert_hf_model
def assert_is_lpln_instance(model):
pytest.importorskip('transformers')
# When checking modules of a HuggingFace model, we need to parse the model object it wraps
# This is not necessary for SimpleTransformerClassifier models.
if isinstance(model, HuggingFaceModel):
model = model.model
# ensure that within the entire model, no PyTorch LayerNorm exists, and at least one LPLN does.
assert model.modules is not None, 'model has .modules method'
for module_class in model.modules():
if isinstance(module_class, LayerNorm):
assert isinstance(module_class, LPLayerNorm)
assert any(isinstance(module_class, LPLayerNorm) for module_class in model.modules())
@device('gpu')
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(SimpleTransformerClassifier, dummy_text_classification_dataloader),
])
def test_low_precision_layernorm_functional(model, dataloader, device: str):
model = model()
# Remove biases and weights from some LayerNorms to test LPLN robustness
if isinstance(model, SimpleTransformerClassifier):
model.module[0].net[1].layers[0].norm1.bias = None # type: ignore
model.module[0].net[1].layers[0].norm2.weight = None # type: ignore
model.module[0].net[1].layers[0].norm2.bias = None # type: ignore
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=get_device(device),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
precision='amp_fp16',
)
if device == 'gpu':
state.model = state.model.cuda() # move the model to gpu
apply_low_precision_layernorm(state.model, state._precision, state.optimizers)
assert_is_lpln_instance(state.model)
@device('gpu')
@pytest.mark.parametrize('model,dataloader', [
(configure_tiny_bert_hf_model, dummy_bert_lm_dataloader),
(SimpleTransformerClassifier, dummy_text_classification_dataloader),
])
def test_low_precision_layernorm_algorithm(model, dataloader, empty_logger: Logger, device: str):
model = model()
# Remove biases and weights from some LayerNorms to test LPLN robustness
if isinstance(model, SimpleTransformerClassifier):
model.module[0].net[1].layers[0].norm1.bias = None # type: ignore
model.module[0].net[1].layers[0].norm2.weight = None # type: ignore
model.module[0].net[1].layers[0].norm2.bias = None # type: ignore
dataloader = dataloader()
state = State(
model=model,
rank_zero_seed=0,
run_name='run_name',
device=get_device(device),
dataloader=dataloader,
dataloader_label='train',
max_duration='1ep',
precision='amp_fp16',
)
low_precision_layernorm = LowPrecisionLayerNorm()
if device == 'gpu':
state.model = state.model.cuda() # move the model to gpu
low_precision_layernorm.apply(Event.INIT, state, empty_logger)
assert_is_lpln_instance(state.model)
| composer-dev | tests/algorithms/test_low_precision_layernorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/optim/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
from typing import List, Optional, Type
import pytest
from torch.utils.data import DataLoader
from composer.core import State, Time
from composer.core.time import TimeUnit
from composer.devices import DeviceCPU, DeviceGPU
from composer.optim.scheduler import (ComposerScheduler, ConstantWithWarmupScheduler, CosineAnnealingScheduler,
CosineAnnealingWarmRestartsScheduler, CosineAnnealingWithWarmupScheduler,
ExponentialScheduler, LinearScheduler, LinearWithWarmupScheduler,
MultiStepScheduler, MultiStepWithWarmupScheduler, PolynomialScheduler,
PolynomialWithWarmupScheduler, StepScheduler)
from composer.trainer.trainer import Trainer
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
MAX_DURATION = '1000ep'
STEPS_PER_EPOCH = 1000
@pytest.fixture
def dummy_schedulers_state(rank_zero_seed: int, request: pytest.FixtureRequest):
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(
model=SimpleModel(),
run_name='run_name',
device=device,
rank_zero_seed=rank_zero_seed,
max_duration=MAX_DURATION,
)
state.set_dataloader([None] * STEPS_PER_EPOCH, 'train')
return state
@pytest.mark.parametrize('scheduler,ssr,test_times,expected_lrs', [
pytest.param(StepScheduler(step_size='10ba'), 1.0, ['5ba', '15ba', '35ba'], [1.0, 0.1, 0.001]),
pytest.param(StepScheduler(step_size='0.002dur', gamma=0.8), 1.0, ['1000ba', '3000ba', '7000ba'],
[1.0, 0.8, 0.512]),
pytest.param(StepScheduler(step_size='1ep', gamma=0.5), 1.0, ['500ba', '1500ba', '3500ba'], [1.0, 0.5, 0.125]),
pytest.param(StepScheduler(step_size='10ba', gamma=0.5), 0.5, ['3ba', '8ba', '18ba'], [1.0, 0.5, 0.125]),
pytest.param(MultiStepScheduler(milestones=['10ba', '30ba', '70ba']), 1.0, ['5ba', '20ba', '50ba', '100ba'],
[1.0, 0.1, 0.01, 0.001]),
pytest.param(MultiStepScheduler(milestones=['100ba', '1ep', '0.01dur'], gamma=0.5), 1.0,
['50ba', '500ba', '5000ba', '50000ba'], [1.0, 0.5, 0.25, 0.125]),
pytest.param(MultiStepScheduler(milestones=['100ba', '1ep', '0.01dur'], gamma=0.5), 4.0,
['200ba', '2000ba', '20000ba', '200000ba'], [1.0, 0.5, 0.25, 0.125]),
pytest.param(LinearScheduler(), 1.0, ['100000ba', '200000ba', '400000ba'], [0.9, 0.8, 0.6]),
pytest.param(LinearScheduler(alpha_i=0.0, alpha_f=2.0), 1.0, ['100000ba', '200000ba', '400000ba'], [0.2, 0.4, 0.8]),
pytest.param(LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max='0.25dur'), 1.0, ['100000ba', '200000ba', '400000ba'],
[0.4, 0.8, 1.0]),
pytest.param(LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max='0.25dur'), 2.0, ['100000ba', '200000ba', '400000ba'],
[0.2, 0.4, 0.8]),
pytest.param(ExponentialScheduler(gamma=0.5), 1.0, ['1ep', '2ep', '4ep'], [0.5, 0.25, 0.0625]),
pytest.param(ExponentialScheduler(gamma=0.5), 2.0, ['2ep', '4ep', '8ep'], [0.5, 0.25, 0.0625]),
pytest.param(CosineAnnealingScheduler(), 1.0, ['0ba', '333333ba', '500000ba', '666667ba', '1000000ba'],
[1.0, 0.75, 0.5, 0.25, 0.0]),
pytest.param(CosineAnnealingScheduler(t_max='30ba', alpha_f=0.5), 1.0,
['0ba', '10ba', '15ba', '20ba', '30ba', '50ba'], [1.0, 0.875, 0.75, 0.625, 0.5, 0.5]),
pytest.param(CosineAnnealingScheduler(t_max='30ba', alpha_f=0.5), 0.2, ['0ba', '2ba', '3ba', '4ba', '6ba', '10ba'],
[1.0, 0.875, 0.75, 0.625, 0.5, 0.5]),
pytest.param(CosineAnnealingWarmRestartsScheduler(t_0='30ba'), 1.0, ['0ba', '10ba', '15ba', '20ba', '30ba', '40ba'],
[1.0, 0.75, 0.5, 0.25, 1.0, 0.75]),
pytest.param(CosineAnnealingWarmRestartsScheduler(t_0='0.003dur', t_mult=1.5), 1.0,
['0ba', '1000ba', '3000ba', '4500ba', '7500ba', '14250ba'], [1.0, 0.75, 1.0, 0.75, 1.0, 1.0]),
pytest.param(CosineAnnealingWarmRestartsScheduler(t_0='30ep', t_mult=2.0, alpha_f=0.5), 0.5,
['0ba', '5000ba', '15000ba', '25000ba'], [1.0, 0.875, 1.0, 0.875]),
pytest.param(PolynomialScheduler(power=2.0), 1.0, ['0ba', '100000ba', '200000ba', '500000ba'],
[1.0, 0.81, 0.64, 0.25]),
pytest.param(PolynomialScheduler(power=2.0, t_max='100ba', alpha_f=0.5), 1.0, ['0ba', '10ba', '20ba', '50ba'],
[1.0, 0.905, 0.82, 0.625]),
pytest.param(PolynomialScheduler(power=2.0, t_max='100ba', alpha_f=0.5), 0.5, ['0ba', '10ba', '20ba', '50ba'],
[1.0, 0.82, 0.68, 0.5]),
pytest.param(MultiStepWithWarmupScheduler(t_warmup='10ba', milestones=['20ba', '40ba']), 1.0,
['0ba', '5ba', '15ba', '25ba', '45ba'], [0.0, 0.5, 1.0, 0.1, 0.01]),
pytest.param(MultiStepWithWarmupScheduler(t_warmup='10ba', milestones=['2ep', '4ep'], gamma=0.5), 0.5,
['0ba', '5ba', '15ba', '1500ba', '2500ba'], [0.0, 0.5, 1.0, 0.5, 0.25]),
pytest.param(MultiStepWithWarmupScheduler(t_warmup='10ba', milestones=['2ep', '4ep'], gamma=0.5, scale_warmup=True),
0.5, ['0ba', '5ba', '15ba', '1500ba', '2500ba'], [0.0, 1.0, 1.0, 0.5, 0.25]),
pytest.param(MultiStepWithWarmupScheduler(t_warmup='1000ep', milestones=[]), 1.0, ['0ep', '100ep', '1000ep'],
[0.0, 0.1, 1.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='500ep'), 1.0,
['0ba', '250000ba', '500000ba', '750000ba', '1000000ba'], [0.0, 0.5, 1.0, 1.0, 1.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='500ep', alpha=3.0), 1.0,
['0ba', '250000ba', '500000ba', '500500ba', '501000ba', '502000ba'], [0.0, 1.5, 3.0, 3.0, 3.0, 3.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='500ep', alpha=3.0), 1.0,
['0ba', '250000ba', '500000ba', '500500ba', '501000ba', '502000ba'], [0.0, 1.5, 3.0, 3.0, 3.0, 3.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='0.0005dur'), 1.0, ['0ba', '250ba', '500ba', '499750ba'],
[0.0, 0.5, 1.0, 1.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='500ep', alpha=3.0, t_max='501000ep'), 0.5,
['0ba', '250000ba', '500000ba', '500500ba', '501000ba', '502000ba'], [0.0, 1.5, 3.0, 3.0, 3.0, 3.0]),
pytest.param(ConstantWithWarmupScheduler(t_warmup='1000ep'), 1.0, ['0ep', '100ep', '1000ep'], [0.0, 0.1, 1.0]),
pytest.param(LinearWithWarmupScheduler(t_warmup='500ep'), 1.0,
['0ba', '250000ba', '500000ba', '750000ba', '1000000ba'], [0.0, 0.5, 1.0, 0.5, 0.0]),
pytest.param(LinearWithWarmupScheduler(t_warmup='500ep', alpha_i=3.0, alpha_f=2.0, t_max='1002ep'), 0.5,
['0ba', '250000ba', '500000ba', '500500ba', '501000ba', '502000ba'], [0.0, 1.5, 3.0, 2.5, 2.0, 2.0]),
pytest.param(LinearWithWarmupScheduler(t_warmup='0.0005dur'), 1.0, ['0ba', '250ba', '500ba', '499750ba'],
[0.0, 0.5, 1.0, 0.5]),
pytest.param(LinearWithWarmupScheduler(t_warmup='500ba', scale_warmup=False), 0.5,
['0ba', '250ba', '500ba', '249875ba'], [0.0, 0.5, 1.0, 0.5]),
pytest.param(LinearWithWarmupScheduler(t_warmup='500ba', scale_warmup=True), 0.5,
['0ba', '125ba', '250ba', '249875ba'], [0.0, 0.5, 1.0, 0.5]),
pytest.param(LinearWithWarmupScheduler(t_warmup='1000ep'), 1.0, ['0ep', '100ep', '1000ep'], [0.0, 0.1, 1.0]),
pytest.param(CosineAnnealingWithWarmupScheduler(t_warmup='0.9dur'), 1.0,
['0ba', '450000ba', '900000ba', '933333ba', '950000ba', '1000000ba'], [0.0, 0.5, 1.0, 0.75, 0.5, 0.0]),
pytest.param(CosineAnnealingWithWarmupScheduler(t_warmup='0.9dur', alpha_f=0.5), 0.01,
['0ba', '4500ba', '9000ba', '9333ba', '9500ba', '10000ba'], [0.0, 0.5, 1.0, 0.875, 0.75, 0.5]),
pytest.param(CosineAnnealingWithWarmupScheduler(t_warmup='0.9dur', alpha_f=0.5, scale_warmup=True), 0.01,
['0ba', '4500ba', '9000ba', '9333ba', '9500ba', '10000ba'], [0.0, 0.5, 1.0, 0.875, 0.75, 0.5]),
pytest.param(CosineAnnealingWithWarmupScheduler(t_warmup='1000ep'), 1.0, ['0ep', '100ep', '1000ep'],
[0.0, 0.1, 1.0]),
pytest.param(PolynomialWithWarmupScheduler(t_warmup='0.9dur'), 1.0,
['0ba', '450000ba', '900000ba', '913397ba', '929289ba', '1000000ba'], [0.0, 0.5, 1.0, 0.75, 0.5, 0.0]),
pytest.param(PolynomialWithWarmupScheduler(t_warmup='0.9dur', alpha_f=0.5), 0.01,
['0ba', '4500ba', '9000ba', '9134ba', '9293ba', '10000ba'], [0.0, 0.5, 1.0, 0.875, 0.75, 0.5]),
pytest.param(PolynomialWithWarmupScheduler(t_warmup='0.9dur', alpha_f=0.5, scale_warmup=True), 0.01,
['0ba', '4500ba', '9000ba', '9134ba', '9293ba', '10000ba'], [0.0, 0.5, 1.0, 0.875, 0.75, 0.5]),
pytest.param(PolynomialWithWarmupScheduler(t_warmup='1000ep'), 1.0, ['0ep', '100ep', '1000ep'], [0.0, 0.1, 1.0]),
])
def test_scheduler_init(scheduler: ComposerScheduler, ssr: float, test_times: List[str], expected_lrs: List[float],
dummy_schedulers_state: State):
state = dummy_schedulers_state
assert state.dataloader_len is not None
assert state.max_duration is not None
state.max_duration = Time(value=int(state.max_duration.value * ssr), unit=state.max_duration.unit)
for test_time, expected_lr in zip(test_times, expected_lrs):
parsed_time = Time.from_timestring(test_time)
assert parsed_time.unit in [TimeUnit.EPOCH, TimeUnit.BATCH]
if parsed_time.unit == TimeUnit.EPOCH:
state.timestamp = state.timestamp.copy(
epoch=parsed_time,
batch=Time(int(state.dataloader_len) * int(parsed_time), TimeUnit.BATCH),
)
else:
state.timestamp = state.timestamp.copy(
batch=parsed_time,
epoch=Time(int(parsed_time) // int(state.dataloader_len), TimeUnit.EPOCH),
)
lr = scheduler(state, ssr)
assert lr == pytest.approx(expected_lr, abs=1e-3)
@pytest.mark.parametrize(
'scheduler,ssr,should_raise',
[
(StepScheduler(step_size='2ba'), 1.0, None),
(StepScheduler(step_size='0.2dur', gamma=0.8), 0.5, None),
(lambda state, ssr=1.0: 0.01 * ssr, 1.5, None), # lambda's are also allowed as a ComposerScheduler
(lambda state: 0.01, 1.0, None), # if the ssr = 1.0, then the lambda need not take the ssr parameter
(lambda state: 0.01, 1.5,
ValueError), # this should error since the ssr != 1.0 and the lambda doesn't support ssr
])
def test_scheduler_trains(
scheduler: ComposerScheduler,
ssr: float,
rank_zero_seed: int,
should_raise: Optional[Type[Exception]],
):
with pytest.raises(should_raise) if should_raise is not None else contextlib.nullcontext():
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='2ep',
train_subset_num_batches=5,
scale_schedule_ratio=ssr,
schedulers=scheduler,
seed=rank_zero_seed,
)
trainer.fit()
| composer-dev | tests/optim/test_scheduler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""
Test inference APIs.
"""
import os
import tempfile
from functools import partial
from unittest.mock import ANY, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from composer.core import Precision, State
from composer.devices import DeviceCPU, DeviceGPU
from composer.functional import apply_gated_linear_units
from composer.loggers import InMemoryLogger, Logger
from composer.loggers.logger_destination import LoggerDestination
from composer.models import composer_resnet
from composer.trainer.dist_strategy import prepare_ddp_module
from composer.trainer.trainer import Trainer
from composer.utils import dist, export_with_logger, inference
from composer.utils.device import get_device
from tests.common import SimpleTransformerClassifier, device
from tests.common.datasets import RandomImageDataset, dummy_transformer_classifier_batch
class MockFileUploader(LoggerDestination):
"""Mocks a generic file uploader interface."""
def can_upload_files(self) -> bool:
return True
@pytest.mark.parametrize('model_cls, sample_input', [
(partial(composer_resnet, 'resnet18'), (torch.rand(4, 3, 224, 224), torch.randint(10, (4,)))),
(SimpleTransformerClassifier, dummy_transformer_classifier_batch(vocab_size=10)),
])
def test_export_for_inference_torchscript(model_cls, sample_input):
model = model_cls()
model.eval()
orig_out = model(sample_input)
save_format = 'torchscript'
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.pt')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path,
)
loaded_model = torch.jit.load(save_path)
loaded_model.eval()
loaded_model_out = loaded_model(sample_input)
torch.testing.assert_close(
orig_out,
loaded_model_out,
msg=f'output mismatch with {save_format}',
)
def test_huggingface_export_for_inference_onnx(tiny_bert_config):
pytest.importorskip('onnx')
pytest.importorskip('onnxruntime')
pytest.importorskip('transformers')
import onnx
import onnx.checker
import onnxruntime as ort
import transformers
from composer.models import HuggingFaceModel
# HuggingFace Bert Model
# dummy sequence batch with 2 labels, 32 sequence length, and 30522 (bert) vocab size).
input_ids = torch.randint(low=0, high=30522, size=(2, 32))
labels = torch.randint(low=0, high=1, size=(2,))
token_type_ids = torch.zeros(size=(2, 32), dtype=torch.int64)
attention_mask = torch.randint(low=0, high=1, size=(2, 32))
sample_input = {
'input_ids': input_ids,
'labels': labels,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
dynamic_axes = {
'input_ids': {
0: 'batch_size',
1: 'seq_len'
},
'labels': {
0: 'batch_size'
},
'token_type_ids': {
0: 'batch_size',
1: 'seq_len'
},
'attention_mask': {
0: 'batch_size',
1: 'seq_len'
},
}
tiny_bert_config.num_labels = 2
tiny_bert_config.hidden_act = 'gelu_new'
hf_model = transformers.AutoModelForSequenceClassification.from_config(
tiny_bert_config) # type: ignore (thirdparty)
model = HuggingFaceModel(hf_model)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
apply_gated_linear_units(model, optimizer)
model.eval()
orig_out = model(sample_input)
save_format = 'onnx'
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.{save_format}')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path,
sample_input=(sample_input, {}),
dynamic_axes=dynamic_axes,
)
loaded_model = onnx.load(save_path)
onnx.checker.check_model(loaded_model)
ort_session = ort.InferenceSession(save_path)
for key, value in sample_input.items():
sample_input[key] = value.numpy()
loaded_model_out = ort_session.run(None, sample_input)
torch.testing.assert_close(
orig_out['logits'].detach().numpy(),
loaded_model_out[1],
rtol=1e-4, # lower tolerance for ONNX
atol=1e-3, # lower tolerance for ONNX
msg=f'output mismatch with {save_format}',
)
@pytest.mark.gpu
def test_gpu_huggingface_export_for_inference_onnx():
pytest.importorskip('onnx')
pytest.importorskip('onnxruntime')
pytest.importorskip('transformers')
import onnx
import onnx.checker
import onnxruntime as ort
import transformers
from composer.functional import apply_low_precision_layernorm
from composer.models import HuggingFaceModel
# HuggingFace Bert Model
# dummy sequence batch with 2 labels, 32 sequence length, and 30522 (bert) vocab size).
input_ids = torch.randint(low=0, high=30522, size=(2, 32))
labels = torch.randint(low=0, high=1, size=(2,))
token_type_ids = torch.zeros(size=(2, 32), dtype=torch.int64)
attention_mask = torch.randint(low=0, high=1, size=(2, 32))
sample_input = {
'input_ids': input_ids,
'labels': labels,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
dynamic_axes = {
'input_ids': {
0: 'batch_size',
1: 'seq_len'
},
'labels': {
0: 'batch_size'
},
'token_type_ids': {
0: 'batch_size',
1: 'seq_len'
},
'attention_mask': {
0: 'batch_size',
1: 'seq_len'
},
}
# non pretrained model to avoid a slow test that downloads the weights.
config = transformers.AutoConfig.from_pretrained('bert-base-uncased', num_labels=2, hidden_act='gelu_new')
hf_model = transformers.AutoModelForSequenceClassification.from_config(config) # type: ignore (thirdparty)
model = HuggingFaceModel(hf_model)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
apply_gated_linear_units(model, optimizer)
apply_low_precision_layernorm(model, Precision('amp_fp16'), optimizer)
model.eval()
orig_out = model(sample_input)
gpu = torch.device('cuda:0')
model.to(gpu)
for key, val in sample_input.items():
sample_input[key] = val.to(gpu)
save_format = 'onnx'
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.{save_format}')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path,
sample_input=(sample_input, {}),
dynamic_axes=dynamic_axes,
)
loaded_model = onnx.load(save_path)
onnx.checker.check_model(loaded_model)
ort_session = ort.InferenceSession(save_path)
for key, value in sample_input.items():
sample_input[key] = value.cpu().numpy()
loaded_model_out = ort_session.run(None, sample_input)
torch.testing.assert_close(
orig_out['logits'].detach().numpy(),
loaded_model_out[1],
rtol=1e-4, # lower tolerance for ONNX
atol=1e-3, # lower tolerance for ONNX
msg=f'output mismatch with {save_format}',
)
@device('cpu', 'gpu')
@pytest.mark.parametrize(
'model_cls, sample_input',
[
(partial(composer_resnet, 'resnet18'), (torch.rand(4, 3, 224, 224), torch.randint(10, (4,)))),
],
)
def test_export_for_inference_onnx(model_cls, sample_input, device):
pytest.importorskip('onnx')
pytest.importorskip('onnxruntime')
import onnx
import onnx.checker
import onnxruntime as ort
model = model_cls()
model.eval()
composer_device = get_device(device)
cpu_device = get_device('cpu')
sample_input = (composer_device.tensor_to_device(sample_input[0]),
composer_device.tensor_to_device(sample_input[1]))
composer_device.module_to_device(model)
orig_out = model(sample_input)
save_format = 'onnx'
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.{save_format}')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path,
sample_input=(sample_input, {}),
)
loaded_model = onnx.load(save_path)
onnx.checker.check_model(loaded_model)
ort_session = ort.InferenceSession(save_path)
loaded_model_out = ort_session.run(
None,
{'input': cpu_device.tensor_to_device(sample_input[0]).numpy()},
)
torch.testing.assert_close(
cpu_device.tensor_to_device(orig_out.detach()).numpy(),
loaded_model_out[0],
rtol=1e-4 if isinstance(composer_device, DeviceCPU) else 1e-3, # lower tolerance for ONNX
atol=1e-3 if isinstance(composer_device, DeviceCPU) else 1e-2, # lower tolerance for ONNX
msg=lambda msg: f'output mismatch with {save_format}\n\nOriginal message: {msg}',
)
@pytest.mark.parametrize(
'model_cls, sample_input',
[
(partial(composer_resnet, 'resnet18'), (torch.rand(1, 3, 224, 224), torch.randint(10, (1,)))),
],
)
@pytest.mark.world_size(2)
def test_export_for_inference_onnx_ddp(model_cls, sample_input, request: pytest.FixtureRequest):
pytest.importorskip('onnx')
pytest.importorskip('onnxruntime')
import onnx
import onnx.checker
import onnxruntime as ort
model = model_cls()
optimizer = torch.optim.SGD(model.parameters(), 0.1)
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(
model=model,
rank_zero_seed=0,
device=device,
run_name='run_name',
optimizers=optimizer,
max_duration='1ep',
dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
dataloader_label='train',
precision='fp32',
)
state.model = prepare_ddp_module(state.model, find_unused_parameters=True)
state.model.eval()
orig_out = state.model(sample_input)
save_format = 'onnx'
# Only one rank needs to save/load model
if dist.get_local_rank() == 0:
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(str(tempdir), f'model.{save_format}')
assert isinstance(state.model.module, nn.Module)
inference.export_for_inference(
model=state.model.module,
save_format=save_format,
save_path=save_path,
sample_input=(sample_input, {}),
)
loaded_model = onnx.load(save_path)
onnx.checker.check_model(loaded_model)
ort_session = ort.InferenceSession(save_path)
loaded_model_out = ort_session.run(
None,
{'input': sample_input[0].numpy()},
)
torch.testing.assert_close(
orig_out.detach().numpy(),
loaded_model_out[0],
rtol=1e-4, # lower tolerance for ONNX
atol=1e-3, # lower tolerance for ONNX
)
@pytest.mark.parametrize(
'model_cls, sample_input',
[
(partial(composer_resnet, 'resnet18'), (torch.rand(1, 3, 224, 224), torch.randint(10, (1,)))),
],
)
@pytest.mark.world_size(2)
def test_export_for_inference_torchscript_ddp(model_cls, sample_input, request: pytest.FixtureRequest):
model = model_cls()
optimizer = torch.optim.SGD(model.parameters(), 0.1)
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(
model=model,
rank_zero_seed=0,
device=device,
run_name='run_name',
optimizers=optimizer,
max_duration='1ep',
dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
dataloader_label='train',
precision='fp32',
)
state.model = prepare_ddp_module(state.model, find_unused_parameters=True)
state.model.eval()
orig_out = state.model(sample_input)
save_format = 'torchscript'
# Only one rank needs to save/load model
if dist.get_local_rank() == 0:
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(str(tempdir), f'model.pt')
assert isinstance(state.model.module, nn.Module)
inference.export_for_inference(
model=state.model.module,
save_format=save_format,
save_path=save_path,
)
loaded_model = torch.jit.load(save_path)
loaded_model.eval()
loaded_model_out = loaded_model(sample_input)
torch.testing.assert_close(orig_out, loaded_model_out)
@pytest.mark.parametrize(
'model_cls, sample_input',
[
(partial(composer_resnet, 'resnet18'), (torch.rand(1, 3, 224, 224), torch.randint(10, (1,)))),
],
)
def test_export_with_file_uploading_logger(model_cls, sample_input):
with patch('composer.utils.inference.export_for_inference'):
save_format = 'torchscript'
model = model_cls()
mock_obj_logger = MockFileUploader()
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.pt')
# Construct the trainer and train
trainer = Trainer(
model=model,
train_dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
max_duration='1ba',
)
trainer.fit()
mock_logger = Logger(state=trainer.state, destinations=[mock_obj_logger])
export_with_logger(
model=model,
save_format=save_format,
save_path=save_path,
logger=mock_logger,
)
# Assert export_for_inference utility called with expected inputs
inference.export_for_inference.assert_called_once_with(
model=model,
save_format=save_format,
save_path=ANY,
sample_input=ANY,
transforms=None,
)
@pytest.mark.parametrize(
'model_cls, sample_input',
[
(partial(composer_resnet, 'resnet18'), (torch.rand(1, 3, 224, 224), torch.randint(10, (1,)))),
],
)
def test_export_with_other_logger(model_cls, sample_input):
with patch('composer.utils.inference.export_for_inference'):
save_format = 'torchscript'
model = model_cls()
non_file_uploading_logger = InMemoryLogger()
with tempfile.TemporaryDirectory() as tempdir:
save_path = os.path.join(tempdir, f'model.pt')
# Construct the trainer and train
trainer = Trainer(
model=model,
train_dataloader=DataLoader(RandomImageDataset(shape=(3, 224, 224))),
max_duration='1ba',
)
trainer.fit()
mock_logger = Logger(
state=trainer.state,
destinations=[non_file_uploading_logger],
)
export_with_logger(
model=model,
save_format=save_format,
save_path=save_path,
logger=mock_logger,
)
# Assert export_for_inference utility called with expected inputs
inference.export_for_inference.assert_called_once_with(
model=model,
save_format=save_format,
save_path=save_path,
save_object_store=None,
sample_input=ANY,
transforms=None,
)
class LinModel(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(256, 128)
self.lin2 = nn.Linear(128, 256)
def forward(self, x):
x = self.lin1(x)
x = self.lin2(x)
return x
@pytest.mark.parametrize(
'model_cls',
[
(LinModel),
],
)
def test_dynamic_quantize(model_cls):
model = model_cls()
save_format = 'torchscript'
with tempfile.TemporaryDirectory() as tempdir:
save_path_no_quantize = os.path.join(tempdir, f'model_no_quantize.pt')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path_no_quantize,
)
save_path_quantize = os.path.join(tempdir, f'model_quantize.pt')
inference.export_for_inference(
model=model,
save_format=save_format,
save_path=save_path_quantize,
transforms=[inference.quantize_dynamic],
)
no_quantize_size = os.path.getsize(save_path_no_quantize)
quantize_size = os.path.getsize(save_path_quantize)
# Size different should be almost 4x
assert no_quantize_size > 3 * quantize_size, "Quantization didn't work"
| composer-dev | tests/utils/test_inference.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.