python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import datetime
import os
import pathlib
from unittest.mock import MagicMock, patch
import pytest
import pytest_httpserver
from composer import loggers
from composer.core.time import Time, Timestamp, TimeUnit
from composer.utils import file_helpers
from composer.utils.file_helpers import (ensure_folder_has_no_conflicting_files, ensure_folder_is_empty,
format_name_with_dist, format_name_with_dist_and_time, get_file, is_tar,
maybe_create_object_store_from_uri,
maybe_create_remote_uploader_downloader_from_uri, parse_uri)
from composer.utils.object_store.libcloud_object_store import LibcloudObjectStore
from tests.common.markers import world_size
from tests.loggers.test_remote_uploader_downloader import DummyObjectStore
@pytest.mark.xfail(reason='Occasionally hits the timeout. Should refactor to use a local webserver.')
def test_get_file_uri(tmp_path: pathlib.Path, httpserver: pytest_httpserver.HTTPServer):
httpserver.expect_request('/hi').respond_with_data('hi')
get_file(
path=httpserver.url_for('/hi'),
object_store=None,
destination=str(tmp_path / 'example'),
)
with open(str(tmp_path / 'example'), 'r') as f:
assert f.readline().startswith('<!')
@pytest.mark.xfail(reason='Occasionally hits the timeout. Should refactor to use a local webserver.')
def test_get_file_uri_not_found(tmp_path: pathlib.Path, httpserver: pytest_httpserver.HTTPServer):
with pytest.raises(FileNotFoundError):
get_file(
path=httpserver.url_for('/not_found_url'),
object_store=None,
destination=str(tmp_path / 'example'),
)
def test_get_file_object_store(tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
pytest.importorskip('libcloud')
remote_dir = tmp_path / 'remote_dir'
os.makedirs(remote_dir)
monkeypatch.setenv('OBJECT_STORE_KEY', str(remote_dir)) # for the local option, the key is the path
provider = LibcloudObjectStore(
provider='local',
key_environ='OBJECT_STORE_KEY',
container='.',
)
with open(str(remote_dir / 'checkpoint.txt'), 'wb') as f:
f.write(b'checkpoint1')
get_file(
path='checkpoint.txt',
object_store=provider,
destination=str(tmp_path / 'example'),
)
with open(str(tmp_path / 'example'), 'rb') as f:
assert f.read() == b'checkpoint1'
def test_get_file_auto_object_store(tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
with patch('composer.utils.file_helpers.S3ObjectStore', DummyObjectStore):
object_store = DummyObjectStore(pathlib.Path('my-test-bucket'))
with open(str(tmp_path / 'test-file.txt'), 'w') as _txt_file:
_txt_file.write('testing')
object_store.upload_object('test-file.txt', str(tmp_path / 'test-file.txt'))
get_file(f's3://my-test-bucket/test-file.txt', str(tmp_path / 'loaded-test-file.txt'))
with open(str(tmp_path / 'loaded-test-file.txt')) as _txt_file:
loaded_content = _txt_file.read()
assert loaded_content.startswith('testing')
def test_get_file_object_store_with_symlink(tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
pytest.importorskip('libcloud')
remote_dir = tmp_path / 'remote_dir'
os.makedirs(remote_dir)
monkeypatch.setenv('OBJECT_STORE_KEY', str(remote_dir)) # for the local option, the key is the path
provider = LibcloudObjectStore(
provider='local',
key_environ='OBJECT_STORE_KEY',
container='.',
)
# Add file to object store
with open(str(remote_dir / 'checkpoint.txt'), 'wb') as f:
f.write(b'checkpoint1')
# Add symlink to object store
with open(str(remote_dir / 'latest.symlink'), 'w') as f:
f.write('checkpoint.txt')
# Fetch object, should automatically follow symlink
get_file(
path='latest.symlink',
object_store=provider,
destination=str(tmp_path / 'example'),
)
with open(str(tmp_path / 'example'), 'rb') as f:
assert f.read() == b'checkpoint1'
# Fetch object without specifying .symlink, should automatically follow
get_file(
path='latest',
object_store=provider,
destination=str(tmp_path / 'example'),
overwrite=True,
)
with open(str(tmp_path / 'example'), 'rb') as f:
assert f.read() == b'checkpoint1'
def test_get_file_object_store_not_found(tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
pytest.importorskip('libcloud')
remote_dir = tmp_path / 'remote_dir'
os.makedirs(remote_dir)
monkeypatch.setenv('OBJECT_STORE_KEY', str(remote_dir)) # for the local option, the key is the path
provider = LibcloudObjectStore(
provider='local',
key_environ='OBJECT_STORE_KEY',
container='.',
)
with pytest.raises(FileNotFoundError):
get_file(
path='checkpoint.txt',
object_store=provider,
destination=str(tmp_path / 'example'),
)
def test_get_file_local_path(tmp_path: pathlib.Path):
tmpfile_name = os.path.join(tmp_path, 'file.txt')
with open(tmpfile_name, 'x') as f:
f.write('hi!')
get_file(
path=tmpfile_name,
object_store=None,
destination=str(tmp_path / 'example'),
)
with open(str(tmp_path / 'example'), 'r') as f:
assert f.read() == 'hi!'
def test_get_file_local_path_not_found():
with pytest.raises(FileNotFoundError):
get_file(
path='/path/does/not/exist',
object_store=None,
destination='destination',
)
def test_is_tar():
assert is_tar('x.tar')
assert is_tar('x.tgz')
assert is_tar('x.tar.gz')
assert is_tar('x.tar.bz2')
assert is_tar('x.tar.lzma')
assert not is_tar('x')
def test_format_name_with_dist():
vars = ['run_name', 'rank', 'node_rank', 'world_size', 'local_world_size', 'local_rank', 'extra']
format_str = ','.join(f'{x}={{{x}}}' for x in vars)
expected_str = 'run_name=awesome_run,rank=0,node_rank=0,world_size=1,local_world_size=1,local_rank=0,extra=42'
assert format_name_with_dist(format_str, 'awesome_run', extra=42) == expected_str
@world_size(2)
def test_safe_format_name_with_dist(monkeypatch: pytest.MonkeyPatch, world_size):
"""node rank deleted, but not in format string, so format should complete."""
vars = ['run_name', 'world_size']
format_str = ','.join(f'{x}={{{x}}}' for x in vars)
expected_str = 'run_name=awesome_run,world_size=2'
monkeypatch.delenv('NODE_RANK')
assert format_name_with_dist(format_str, 'awesome_run') == expected_str
@world_size(2)
def test_unsafe_format_name_with_dist(monkeypatch: pytest.MonkeyPatch, world_size):
"""Node rank is deleted, but also in the format string, so expect error."""
vars = ['run_name', 'node_rank']
format_str = ','.join(f'{x}={{{x}}}' for x in vars)
monkeypatch.delenv('NODE_RANK')
with pytest.raises(KeyError):
assert format_name_with_dist(format_str, 'awesome_run') == 'run_name=awesome_run,node_rank=3'
def test_format_name_with_dist_and_time():
vars = [
'run_name',
'rank',
'node_rank',
'world_size',
'local_world_size',
'local_rank',
'extra',
'epoch',
'batch',
'batch_in_epoch',
'sample',
'sample_in_epoch',
'token',
'token_in_epoch',
'total_wct',
'epoch_wct',
'batch_wct',
]
format_str = ','.join(f'{x}={{{x}}}' for x in vars)
expected_str = ('run_name=awesome_run,rank=0,node_rank=0,world_size=1,local_world_size=1,local_rank=0,extra=42,'
'epoch=0,batch=1,batch_in_epoch=1,sample=2,sample_in_epoch=2,token=3,token_in_epoch=3,'
'total_wct=36000.0,epoch_wct=3000.0,batch_wct=5.0')
timestamp = Timestamp(
epoch=Time.from_timestring('0ep'),
batch=Time.from_timestring('1ba'),
batch_in_epoch=Time.from_timestring('1ba'),
sample=Time.from_timestring('2sp'),
sample_in_epoch=Time.from_timestring('2sp'),
token=Time.from_timestring('3tok'),
token_in_epoch=Time.from_timestring('3tok'),
total_wct=datetime.timedelta(hours=10), # formatted as seconds
epoch_wct=datetime.timedelta(minutes=50), # formatted as seconds
batch_wct=datetime.timedelta(seconds=5), # formatted as seconds
)
assert format_name_with_dist_and_time(format_str, 'awesome_run', timestamp=timestamp, extra=42) == expected_str
@pytest.mark.parametrize('input_uri,expected_parsed_uri', [
('backend://bucket/path', ('backend', 'bucket', 'path')),
('backend://bucket@namespace/path', ('backend', 'bucket', 'path')),
('backend://bucket/a/longer/path', ('backend', 'bucket', 'a/longer/path')),
('a/long/path', ('', '', 'a/long/path')),
('/a/long/path', ('', '', '/a/long/path')),
('backend://bucket/', ('backend', 'bucket', '')),
('backend://bucket', ('backend', 'bucket', '')),
('backend://', ('backend', '', '')),
])
def test_parse_uri(input_uri, expected_parsed_uri):
actual_parsed_uri = parse_uri(input_uri)
assert actual_parsed_uri == expected_parsed_uri
def test_maybe_create_object_store_from_uri(monkeypatch):
mock_s3_obj = MagicMock()
monkeypatch.setattr(file_helpers, 'S3ObjectStore', mock_s3_obj)
mock_oci_obj = MagicMock()
monkeypatch.setattr(file_helpers, 'OCIObjectStore', mock_oci_obj)
mock_gs_libcloud_obj = MagicMock()
monkeypatch.setattr(file_helpers, 'LibcloudObjectStore', mock_gs_libcloud_obj)
assert maybe_create_object_store_from_uri('checkpoint/for/my/model.pt') is None
maybe_create_object_store_from_uri('s3://my-bucket/path')
mock_s3_obj.assert_called_once_with(bucket='my-bucket')
with pytest.raises(NotImplementedError):
maybe_create_object_store_from_uri('wandb://my-cool/checkpoint/for/my/model.pt')
with pytest.raises(ValueError):
maybe_create_object_store_from_uri('gs://my-bucket/path')
os.environ['GCS_KEY'] = 'foo'
os.environ['GCS_SECRET'] = 'foo'
maybe_create_object_store_from_uri('gs://my-bucket/path')
mock_gs_libcloud_obj.assert_called_once_with(
provider='google_storage',
container='my-bucket',
key_environ='GCS_KEY',
secret_environ='GCS_SECRET',
)
del os.environ['GCS_KEY']
del os.environ['GCS_SECRET']
maybe_create_object_store_from_uri('oci://my-bucket/path')
mock_oci_obj.assert_called_once_with(bucket='my-bucket')
with pytest.raises(NotImplementedError):
maybe_create_object_store_from_uri('ms://bucket/checkpoint/for/my/model.pt')
def test_maybe_create_remote_uploader_downloader_from_uri(monkeypatch):
assert maybe_create_remote_uploader_downloader_from_uri('checkpoint/for/my/model.pt', loggers=[]) is None
from composer.loggers import RemoteUploaderDownloader
mock_remote_ud_obj = MagicMock()
mock_remote_ud_obj.remote_backend_name = 's3'
mock_remote_ud_obj.remote_bucket_name = 'my-nifty-bucket'
mock_remote_ud_obj.__class__ = RemoteUploaderDownloader
with pytest.warns(Warning, match='There already exists a RemoteUploaderDownloader object to handle'):
maybe_create_remote_uploader_downloader_from_uri('s3://my-nifty-bucket/path', loggers=[mock_remote_ud_obj])
del RemoteUploaderDownloader
with monkeypatch.context() as m:
mock_remote_ud = MagicMock()
m.setattr(loggers, 'RemoteUploaderDownloader', mock_remote_ud)
maybe_create_remote_uploader_downloader_from_uri('s3://my-nifty-s3-bucket/path/to/checkpoints.pt', loggers=[])
mock_remote_ud.assert_called_once_with(bucket_uri='s3://my-nifty-s3-bucket')
with monkeypatch.context() as m:
mock_remote_ud = MagicMock()
m.setattr(loggers, 'RemoteUploaderDownloader', mock_remote_ud)
maybe_create_remote_uploader_downloader_from_uri('oci://my-nifty-oci-bucket/path/to/checkpoints.pt', loggers=[])
mock_remote_ud.assert_called_once_with(bucket_uri='oci://my-nifty-oci-bucket')
with monkeypatch.context() as m:
mock_remote_ud = MagicMock()
m.setattr(loggers, 'RemoteUploaderDownloader', mock_remote_ud)
with pytest.raises(ValueError):
maybe_create_remote_uploader_downloader_from_uri('gs://my-nifty-gs-bucket/path/to/checkpoints.pt',
loggers=[])
os.environ['GCS_KEY'] = 'foo'
os.environ['GCS_SECRET'] = 'foo'
maybe_create_remote_uploader_downloader_from_uri('gs://my-nifty-gs-bucket/path/to/checkpoints.pt', loggers=[])
mock_remote_ud.assert_called_once_with(bucket_uri='libcloud://my-nifty-gs-bucket',
backend_kwargs={
'provider': 'google_storage',
'container': 'my-nifty-gs-bucket',
'key_environ': 'GCS_KEY',
'secret_environ': 'GCS_SECRET',
})
del os.environ['GCS_KEY']
del os.environ['GCS_SECRET']
with pytest.raises(NotImplementedError):
maybe_create_remote_uploader_downloader_from_uri('wandb://my-cool/checkpoint/for/my/model.pt', loggers=[])
with pytest.raises(NotImplementedError):
maybe_create_remote_uploader_downloader_from_uri('ms://bucket/checkpoint/for/my/model.pt', loggers=[])
def test_ensure_folder_is_empty(tmp_path: pathlib.Path):
ensure_folder_is_empty(tmp_path)
@pytest.mark.parametrize(
'filename,new_file,success',
[
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep1-batch3-tie6-rank0.pt', True
], # Ignore timestamps in past
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep2-batch6-tie7-rank0.pt', True
], # Ignore timestamps in with same time as current
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep1-batch6-tie9-rank0.pt', True
], # Ignore timestamps with earlier epochs but later samples in epoch
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'inglorious-monkeys-ep1-batch3-tie6-rank0.pt', True
], # Ignore timestamps of different runs
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt', 'blazing-unicorn-ep3-rank0.pt',
True
], # Ignore timestamps with same run name but different format
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep3-batch9-tie6-rank0.pt', False
], # Error if in future
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep3-batch9-tie6-rank0.pt', False
], # Error if in future with different rank
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep1-batch9-tie6-rank0.pt', False
], # Error if in future for batches but not epochs
[
'blazing-unicorn-ep{epoch}-batch{batch}-tie{token_in_epoch}-rank{rank}.pt',
'blazing-unicorn-ep2-batch7-tie9-rank0.pt', False
], # Error if in same epoch but later in sample in epoch
[
'charging-chungus-ep{epoch}-b{batch}-s{sample}-t{token}-bie{batch_in_epoch}-sie{sample_in_epoch}-tie{token_in_epoch}.pt',
'charging-chungus-ep1-b3-s6-t12-bie0-sie0-tie0.pt', True
], # Ignore timestamps in past
[
'charging-chungus-ep{epoch}-b{batch}-s{sample}-t{token}-bie{batch_in_epoch}-sie{sample_in_epoch}-tie{token_in_epoch}.pt',
'charging-chungus-ep2-b7-s15-t31-bie1-sie3-tie8.pt', False
], # Error if in future
],
)
def test_ensure_folder_has_no_conflicting_files(
tmp_path: pathlib.Path,
filename: str,
new_file: str,
success: bool,
):
timestamp = Timestamp(epoch=Time(2, TimeUnit.EPOCH),
batch=Time(7, TimeUnit.BATCH),
batch_in_epoch=Time(1, TimeUnit.BATCH),
sample=Time(15, TimeUnit.SAMPLE),
sample_in_epoch=Time(3, TimeUnit.SAMPLE),
token=Time(31, TimeUnit.TOKEN),
token_in_epoch=Time(7, TimeUnit.TOKEN))
with open(os.path.join(tmp_path, new_file), 'w') as f:
f.write('hello')
if success:
ensure_folder_has_no_conflicting_files(tmp_path, filename, timestamp)
else:
with pytest.raises(FileExistsError):
ensure_folder_has_no_conflicting_files(tmp_path, filename, timestamp)
| composer-dev | tests/utils/test_file_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import itertools
from typing import Mapping, Type, cast
from unittest.mock import Mock
import pytest
import torch
from torch import nn
from composer.algorithms.blurpool import BlurMaxPool2d
from composer.utils import module_surgery
from tests.common import SimpleModel
class RecursiveLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int):
super().__init__(in_features, out_features)
# submodule has modified out_features to prevent infinite recursion during test
self.submodule = nn.Linear(in_features, out_features - 1)
class SimpleReplacementPolicy(nn.Module):
"""Bundle the model, replacement function, and validation into one class."""
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(in_features=16, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=10)
self.pool = nn.MaxPool2d(kernel_size=3)
@staticmethod
def maybe_replace_linear(module: torch.nn.Module, module_index: int):
del module_index # unused
if module.out_features in (10, 9) and not isinstance(module, RecursiveLinear):
return RecursiveLinear(cast(int, module.in_features), cast(int, module.out_features))
return None
@staticmethod
def replace_pool(module: torch.nn.Module, module_index: int):
assert isinstance(module, nn.MaxPool2d)
return BlurMaxPool2d.from_maxpool2d(module, module_index)
def policy(self) -> Mapping[Type[torch.nn.Module], module_surgery.ReplacementFunction]:
return {
nn.Linear: self.maybe_replace_linear,
nn.MaxPool2d: self.replace_pool,
}
def validate_replacements(self, recurse_on_replacements: bool):
assert type(self.fc1) is nn.Linear
assert type(self.fc2) is RecursiveLinear
assert type(self.pool) is BlurMaxPool2d
if recurse_on_replacements:
assert type(self.fc2.submodule) is RecursiveLinear
assert type(self.fc2.submodule.submodule) is nn.Linear
else:
assert type(self.fc2.submodule) is nn.Linear
class ModuleIdxReplacementPolicy(SimpleReplacementPolicy):
"""Test replacing only the first instance of a Linear layer."""
@staticmethod
def maybe_replace_linear(module: torch.nn.Module, module_index: int):
if module_index == 0:
return RecursiveLinear(cast(int, module.in_features), cast(int, module.out_features))
return None
def validate_replacements(self, recurse_on_replacements: bool):
del recurse_on_replacements # unused
assert type(self.fc1) is RecursiveLinear
assert type(self.fc2) is nn.Linear
assert type(self.fc1.submodule) is nn.Linear
class NoOpReplacementPolicy(SimpleReplacementPolicy):
def policy(self):
return {nn.Conv2d: Mock(side_effect=AssertionError('test should not match on this layer'))}
def validate_replacements(self, recurse_on_replacements: bool):
del recurse_on_replacements # unused
assert type(self.fc1) is nn.Linear
assert type(self.fc2) is nn.Linear
@pytest.mark.parametrize('recurse_on_replacements', [True, False])
@pytest.mark.parametrize('model_cls', [
SimpleReplacementPolicy,
ModuleIdxReplacementPolicy,
NoOpReplacementPolicy,
])
def test_module_replacement(
model_cls: Type[SimpleReplacementPolicy],
recurse_on_replacements: bool,
):
model = model_cls()
module_surgery.replace_module_classes(
model,
optimizers=None,
policies=model.policy(),
recurse_on_replacements=recurse_on_replacements,
)
model.validate_replacements(recurse_on_replacements)
@pytest.mark.gpu
def test_module_replacement_gpu():
model = SimpleReplacementPolicy()
model = model.cuda()
module_surgery.replace_module_classes(
model,
optimizers=None,
policies=model.policy(),
recurse_on_replacements=False,
)
model.validate_replacements(False)
# Validate the model devices are correct
for p in itertools.chain(model.parameters(), model.buffers()):
assert p.device.type == 'cuda'
class _CopyLinear(torch.nn.Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.parameter.Parameter(torch.empty((out_features, in_features)))
self.bias = None
@staticmethod
def from_linear(module: torch.nn.Module, module_index: int = -1):
assert isinstance(module.in_features, int)
assert isinstance(module.out_features, int)
ret = _CopyLinear(
in_features=module.in_features,
out_features=module.out_features,
)
with torch.no_grad():
# new param object
assert isinstance(module.weight, torch.Tensor)
ret.weight.copy_(module.weight)
ret.bias = module.bias # same param object
return ret
@pytest.fixture
def optimizer_surgery_state():
"""Returns a tuple of (old_layers, new_layers, and optimizer)."""
model = SimpleModel(num_features=1, num_classes=10)
policy: Mapping[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {
torch.nn.Linear: _CopyLinear.from_linear
}
opt = torch.optim.SGD(model.parameters(), lr=.001)
orig_linear_modules = [model.fc1, model.fc2]
module_surgery.replace_module_classes(model, policies=policy, optimizers=opt)
new_linear_modules = [model.fc1, model.fc2]
return orig_linear_modules, new_linear_modules, opt
def test_no_duplicate_params(optimizer_surgery_state):
_, _, opt = optimizer_surgery_state
params_list = opt.param_groups[0]['params']
params_set = set(params_list)
assert len(params_list) == len(params_set)
def _param_in_optimizer(param: torch.nn.parameter.Parameter, opt: torch.optim.Optimizer):
return module_surgery._find_param_in_optimizer(param, opt) >= 0
def test_params_removed(optimizer_surgery_state):
orig_linear_modules, _, opt = optimizer_surgery_state
for module in orig_linear_modules:
assert isinstance(module.weight, torch.nn.parameter.Parameter)
assert not _param_in_optimizer(module.weight, opt)
def test_new_params_added(optimizer_surgery_state):
_, new_linear_modules, opt = optimizer_surgery_state
for module in new_linear_modules:
assert isinstance(module.weight, torch.nn.parameter.Parameter)
assert _param_in_optimizer(module.weight, opt)
assert isinstance(module.bias, torch.nn.parameter.Parameter)
assert _param_in_optimizer(module.bias, opt)
def test_params_kept(optimizer_surgery_state):
orig_linear_modules, _, opt = optimizer_surgery_state
for module in orig_linear_modules:
assert isinstance(module.bias, torch.nn.parameter.Parameter)
assert _param_in_optimizer(module.bias, opt)
class ParamTestModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(8, 8)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(32, 32)
self.fc4 = nn.Linear(64, 64)
def test_update_params_in_optimizer():
m1 = ParamTestModel()
m2 = ParamTestModel()
optimizer = torch.optim.Adam(m1.parameters(), lr=0.01)
current_order = list(m2.parameters())
module_surgery.update_params_in_optimizer(old_params=m1.parameters(),
new_params=m2.parameters(),
optimizers=optimizer)
post_replacement_order = optimizer.param_groups[0]['params']
for idx, value in enumerate(current_order):
assert torch.all(value.eq(post_replacement_order[idx]))
| composer-dev | tests/utils/test_module_surgery.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from functools import partial
from composer.utils import import_object
def test_dynamic_import_object():
assert import_object('functools:partial') is partial
| composer-dev | tests/utils/test_dynamic_import.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from composer.utils import retry
@pytest.mark.parametrize('with_args', [True, False])
def test_retry(with_args: bool):
num_tries = 0
return_after = 2
if with_args:
decorator = retry(RuntimeError, num_attempts=3, initial_backoff=0.01, max_jitter=0.01)
return_after = 2
else:
decorator = retry
# Need to return immediately to avoid timeouts
return_after = 0
@decorator
def flaky_function():
nonlocal num_tries
if num_tries < return_after:
num_tries += 1
raise RuntimeError('Called too soon!')
return "Third time's a charm"
assert flaky_function() == "Third time's a charm"
| composer-dev | tests/utils/test_retrying.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from torch.optim import Adam
from torch.utils.data import DataLoader
from composer.algorithms import EMA
from composer.callbacks import SpeedMonitor
from composer.loggers import InMemoryLogger
from composer.trainer import Trainer
from composer.utils import convert_flat_dict_to_nested_dict, convert_nested_dict_to_flat_dict, extract_hparams
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
def test_convert_nested_dict_to_flat_dict():
test_nested_dict = {'a': 1, 'b': {'c': 2, 'd': 3}, 'e': {'f': {'g': 4}}}
expected_flat_dict = {'a': 1, 'b/c': 2, 'b/d': 3, 'e/f/g': 4}
actual_flat_dict = convert_nested_dict_to_flat_dict(test_nested_dict)
assert actual_flat_dict == expected_flat_dict
def test_convert_flat_dict_to_nested_dict():
expected_nested_dict = {'a': 1, 'b': {'c': 2, 'd': 3}, 'e': {'f': {'g': 4}}}
test_flat_dict = {'a': 1, 'b/c': 2, 'b/d': 3, 'e/f/g': 4}
actual_nested_dict = convert_flat_dict_to_nested_dict(test_flat_dict)
assert actual_nested_dict == expected_nested_dict
def test_extract_hparams():
class Foo:
def __init__(self):
self.g = 7
class Bar:
def __init__(self):
self.local_hparams = {'m': 11}
locals_dict = {
'a': 1.5,
'b': {
'c': 2.5,
'd': 3
},
'e': [4, 5, 6.2],
'f': Foo(),
'p': Bar(),
'_g': 7,
'h': None,
'i': True
}
expected_parsed_dict = {
'a': 1.5,
'b': {
'c': 2.5,
'd': 3
},
'e': [4, 5, 6.2],
'f': 'Foo',
'p': {
'Bar': {
'm': 11,
}
},
'h': None,
'i': True,
}
parsed_dict = extract_hparams(locals_dict)
assert parsed_dict == expected_parsed_dict
def test_extract_hparams_trainer():
train_dl = DataLoader(RandomClassificationDataset(), batch_size=16)
model = SimpleModel()
optimizer = Adam(model.parameters(), eps=1e-3)
trainer = Trainer(
model=model,
train_dataloader=train_dl,
device_train_microbatch_size=16,
optimizers=optimizer,
auto_log_hparams=True,
progress_bar=False,
log_to_console=False,
run_name='test',
seed=3,
algorithms=[EMA()],
loggers=[InMemoryLogger()],
callbacks=[SpeedMonitor()],
)
expected_hparams = {
'model': 'SimpleModel',
# Train Dataloader
'train_dataloader': 'DataLoader',
'train_dataloader_label': 'train',
'train_subset_num_batches': -1,
# Stopping Condition
'max_duration': None,
# Algorithms
'algorithms': ['EMA'],
# Engine Pass Registration
'algorithm_passes': None,
# Optimizers and Scheduling
'optimizers': 'Adam',
'schedulers': None,
'scale_schedule_ratio': 1.0,
'step_schedulers_every_batch': None,
# Evaluators
'eval_dataloader': None,
'eval_interval': 1,
'eval_subset_num_batches': -1,
# Callbacks and Logging
'callbacks': ['SpeedMonitor'],
'loggers': ['InMemoryLogger'],
'run_name': 'test',
'progress_bar': False,
'log_to_console': False,
'console_stream': 'stderr',
'console_log_interval': '1ba',
'log_traces': False,
'auto_log_hparams': True,
# Load Checkpoint
'load_path': None,
'load_object_store': None,
'load_weights_only': False,
'load_strict_model_weights': False,
'load_progress_bar': True,
'load_ignore_keys': None,
'load_exclude_algorithms': None,
# Save Checkpoint
'save_folder': None,
'save_filename': 'ep{epoch}-ba{batch}-rank{rank}.pt',
'save_latest_filename': 'latest-rank{rank}.pt',
'save_overwrite': False,
'save_interval': '1ep',
'save_weights_only': False,
'save_num_checkpoints_to_keep': -1,
# Graceful Resumption
'autoresume': False,
# DeepSpeed
'deepspeed_config': None,
'fsdp_config': None,
# System/Numerics
'device': 'DeviceCPU',
'precision': 'Precision',
'device_train_microbatch_size': 16,
# Reproducibility
'seed': 3,
'deterministic_mode': False,
# Distributed Training
'dist_timeout': 1800.0,
'ddp_sync_strategy': None,
# Profiling
'profiler': None,
# Python logging
'python_log_level': None,
'auto_microbatching': False,
'rank_zero_seed': 3,
'latest_remote_file_name': None,
'num_optimizers': 1,
'remote_ud_has_format_string': [False],
}
assert trainer.local_hparams == expected_hparams
| composer-dev | tests/utils/test_autolog_hparams.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/utils/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from collections import ChainMap, Counter, OrderedDict, defaultdict, deque
from typing import NamedTuple
import numpy as np
import pytest
import torch
from composer.utils.batch_helpers import batch_get, batch_set
my_list = [3, 4, 5, 6, 7, 8, 9, 10]
keys = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
class MyClass(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
list_types = [type(element) for element in my_list]
my_named_tuple = NamedTuple('nt', **dict(zip(keys, list_types)))
counter_list = []
for char, num in zip(keys, my_list):
counter_list.extend(num * [char])
@pytest.fixture(scope='module', params=[
my_list,
tuple(my_list),
deque(my_list),
])
def example_sequence(request):
return request.param
@pytest.fixture(scope='module', params=[list, tuple])
def example_dequeless_sequence(request):
my_list = [3, 4, 5, 6, 7, 8, 9, 10]
return request.param(my_list)
# All key value pair data structures that have a __getitem__ function thats takes str.
@pytest.fixture(scope='module',
params=[
dict(zip(keys, my_list)),
defaultdict(list, **dict(zip(keys, my_list))),
ChainMap(dict(zip(keys, my_list)), dict(a=7, j=3)),
Counter(counter_list),
OrderedDict(**dict(zip(keys, my_list)))
])
def example_map(request):
return request.param
@pytest.fixture(scope='module', params=[MyClass(**dict(zip(keys, my_list))), my_named_tuple(*my_list)])
def example_attr_store(request):
return request.param
@pytest.fixture(scope='module', params=[
torch.tensor(my_list),
np.asarray(my_list),
])
def example_array_tensor(request):
return request.param
@pytest.fixture
def example_tensor():
return torch.tensor([3, 4, 5, 6, 7, 8, 9, 10])
@pytest.fixture
def example_array():
return np.asarray([3, 4, 5, 6, 7, 8, 9, 10])
@pytest.fixture
def example_2D_array():
return np.arange(12).reshape(4, 3)
@pytest.fixture
def example_2D_tensor():
return torch.arange(12).reshape(4, 3)
@pytest.fixture(scope='module', params=[np.arange(12).reshape(4, 3), torch.arange(12).reshape(4, 3)])
def example_2d(request):
return request.param
@pytest.fixture
def example_dict():
return [{'a': [1, 2], 'b': [2, 4]}, {'c': [3, 6], 'd': [5, 7]}]
@pytest.fixture
def dict_getter():
def my_get_callable(batch):
return batch[1]['d'][0]
return my_get_callable
@pytest.fixture
def dict_setter():
def my_set_callable(batch, value):
batch[1]['d'][0] = value
return batch
return my_set_callable
@pytest.fixture
def example_list():
return my_list
@pytest.fixture
def example_tuple():
return tuple(my_list)
# Test whether sequences can be indexed by an int.
def test_int_key(example_sequence, key=2, expected=5):
assert batch_get(example_sequence, key) == expected
# Test whether sequences can be indexed by an int.
def test_int_key_array_tensor(example_array_tensor, key=2, expected=5):
assert batch_get(example_array_tensor, key) == expected
# Test whether kv pair data structures can be indexed by a str.
def test_map_str_key(example_map, key='d', expected=6):
assert batch_get(example_map, key) == expected
# Test whether kv pair data structures can be indexed by a str.
def test_attr_store_str_key(example_attr_store, key='d', expected=6):
assert batch_get(example_attr_store, key) == expected
# Test whether sequences can be indexed by a sequence of ints.
def test_sequence_of_ints_key(example_sequence):
key = [2, 5, 7]
expected = [5, 8, 10]
assert list(batch_get(example_sequence, key)) == expected
# Test whether sequences can be indexed by a sequence of ints.
def test_sequence_of_ints_key_array_tensor(example_array_tensor):
key = [2, 5, 7]
expected = [5, 8, 10]
assert list(batch_get(example_array_tensor, key)) == expected
# Test whether kv pair data structures can be indexed by a sequence of strings.
def test_sequence_of_strs_key(example_map):
key = ['c', 'f']
expected = [5, 8]
assert list(batch_get(example_map, key)) == expected
# Test whether kv pair data structures can be indexed by a sequence of strings.
def test_sequence_of_strs_key_attr_store(example_attr_store):
key = ['c', 'f']
expected = [5, 8]
assert list(batch_get(example_attr_store, key)) == expected
# Test whether sequences can be indexed by a slice object.
def test_batch_get_seq_with_slice_key(example_dequeless_sequence):
key = slice(1, 6, 2)
expected = (4, 6, 8)
assert tuple(batch_get(example_dequeless_sequence, key)) == expected
# Test whether sequences can be indexed by a slice object.
def test_batch_get_array_tensor_slice_key(example_array_tensor):
key = slice(1, 6, 2)
expected = [4, 6, 8]
assert list(batch_get(example_array_tensor, key)) == expected
# Test whether arrays and tensors can be indexed by a sequence of int objects.
@pytest.mark.parametrize('key,expected', [([1, 4], [4, 7])])
def test_batch_get_seq_key_for_1D_tensors_and_arrays(example_array_tensor, key, expected):
assert batch_get(example_array_tensor, key).tolist() == expected
def test_batch_get_callable(example_dict, dict_getter):
assert batch_get(example_dict, dict_getter) == 5
def test_batch_get_pair_of_callables(example_dict, dict_getter, dict_setter):
assert batch_get(example_dict, (dict_getter, dict_setter)) == 5
assert batch_get(example_dict, [dict_getter, dict_setter]) == 5
def test_batch_get_with_setter_errors_out(example_dict, dict_setter):
with pytest.raises(TypeError):
batch_get(example_dict, (dict_setter, dict_setter))
with pytest.raises(TypeError):
batch_get(example_dict, dict_setter)
def test_batch_get_not_pair_of_callables(example_dict, dict_getter):
# >2 callables
with pytest.raises(ValueError):
batch_get(example_dict, (dict_getter, dict_getter, dict_getter))
# Singleton of callable
with pytest.raises(ValueError):
batch_get(example_dict, (dict_getter,))
# Test whether arrays and tensors can be indexed by a sequence of slice objects.
@pytest.mark.parametrize('batch,key,expected', [(torch.tensor(my_list), [slice(1, 4), slice(
5, 7)], [torch.tensor([4, 5, 6]), torch.tensor([8, 9])])])
def test_batch_get_seq_of_slices_key_for_1D_tensors_and_arrays(batch, key, expected):
for actual, expectation in zip(batch_get(batch, key), expected):
assert all(actual == expectation)
@pytest.mark.parametrize('key,expected', [((1, 2), 5)])
def test_batch_get_2D_array_tensor_2D_tuple_key(example_2d, key, expected):
actual = batch_get(example_2d, key)
assert int(actual) == expected
@pytest.mark.parametrize('key,expected', [([1, 2], [[3, 4, 5], [6, 7, 8]]),
(np.asarray([1, 2]), [[3, 4, 5], [6, 7, 8]]),
(torch.tensor([1, 2]), [[3, 4, 5], [6, 7, 8]])])
def test_batch_get_2D_array_tensor_2D_key(example_2d, key, expected):
actual = batch_get(example_2d, key)
assert actual.tolist() == expected
@pytest.mark.parametrize('key,expected', [([slice(2, 4), slice(1, 3)], [[7, 8], [10, 11]])])
def test_batch_get_2D_array_tensor_2D_slice_key(example_2D_tensor, key, expected):
actual = batch_get(example_2D_tensor, key)
assert actual.tolist() == expected
### SET
def test_batch_set_sequence_int_key(example_sequence, key=3, value=23):
new_batch = batch_set(example_sequence, key=key, value=value)
assert batch_get(new_batch, key) == value
def test_batch_set_array_tensor_int_key(example_array_tensor, key=3, value=23):
new_batch = batch_set(example_array_tensor, key=key, value=value)
assert batch_get(new_batch, key) == value
def test_batch_set_map_str_key(example_map, key='b', value=-10):
new_batch = batch_set(example_map, key=key, value=value)
assert batch_get(new_batch, key) == value
def test_batch_set_attr_store_str_key(example_attr_store, key='b', value=23):
new_batch = batch_set(example_attr_store, key=key, value=value)
assert batch_get(new_batch, key) == value
def test_batch_set_sequence_slice_key(example_dequeless_sequence):
key = slice(1, 6, 2)
value = [-1, -3, -5]
new_batch = batch_set(example_dequeless_sequence, key=key, value=value)
assert tuple(batch_get(new_batch, key)) == tuple(value)
def test_batch_set_tensor_slice_key(example_tensor):
key = slice(1, 6, 2)
value = torch.tensor([-1, -3, -5])
new_batch = batch_set(example_tensor, key=key, value=value)
assert torch.equal(batch_get(new_batch, key), value)
def test_batch_set_array_slice_key(example_array):
key = slice(1, 6, 2)
value = np.asarray([-1, -3, -5])
new_batch = batch_set(example_array, key=key, value=value)
assert np.array_equal(batch_get(new_batch, key), value)
@pytest.mark.parametrize('key,value', [([2, 5], (11, 13))])
def test_batch_set_seq_list_key(example_sequence, key, value):
new_batch = batch_set(example_sequence, key=key, value=value)
assert tuple(batch_get(new_batch, key)) == tuple(value)
@pytest.mark.parametrize('key,value', [(['d', 'e'], (100, 101))])
def test_batch_set_map_seq_key(example_map, key, value):
new_batch = batch_set(example_map, key=key, value=value)
assert batch_get(new_batch, key) == value
@pytest.mark.parametrize('key,value', [(['d', 'e'], (100, 101))])
def test_batch_set_attr_store_seq_key(example_attr_store, key, value):
new_batch = batch_set(example_attr_store, key=key, value=value)
assert batch_get(new_batch, key) == value
@pytest.mark.parametrize('key,value', [([2, 5], np.asarray([11, 13]))])
def test_batch_set_array_list_key(example_array, key, value):
new_batch = batch_set(example_array, key=key, value=value)
assert np.array_equal(batch_get(new_batch, key), value)
@pytest.mark.parametrize('key,value', [([2, 5], torch.tensor([11, 13]))])
def test_batch_set_tensor_list_key(example_tensor, key, value):
new_batch = batch_set(example_tensor, key=key, value=value)
assert torch.equal(batch_get(new_batch, key), value)
@pytest.mark.parametrize('key,value', [([slice(0, 3, 1), slice(4, 7, 1)], ([10, 11, 12], [13, 14, 15]))])
def test_batch_set_list_list_of_slices_key(example_list, key, value):
new_batch = batch_set(example_list, key=key, value=value)
assert batch_get(new_batch, key) == value
@pytest.mark.parametrize('key,value', [([slice(0, 3, 1), slice(4, 7, 1)], ((10, 11, 12), (13, 14, 15)))])
def test_batch_set_tuple_list_of_slices_key(example_tuple, key, value):
new_batch = batch_set(example_tuple, key=key, value=value)
assert batch_get(new_batch, key) == value
# Test whether tensors can be set using batch_set with a list of slices.
def test_batch_set_1D_tensor_list_of_slices_key(example_tensor):
key = [slice(0, 3, 1), slice(4, 7, 1)]
value = [torch.tensor([10, 11, 12]), torch.tensor([13, 14, 15])]
new_batch = batch_set(example_tensor, key=key, value=value)
for actual, expectation in zip(batch_get(new_batch, key), value):
assert torch.equal(actual, expectation)
# Test whether arrays can be set using batch_set with a list of slices.
def test_batch_set_1D_array_list_of_slices_key(example_array):
key = (slice(0, 3, 1), slice(4, 7, 1))
value = [np.asarray([10, 11, 12]), np.asarray([13, 14, 15])]
new_batch = batch_set(example_array, key=key, value=value)
for actual, expectation in zip(batch_get(new_batch, key), value):
assert np.all(actual == expectation)
@pytest.mark.parametrize('key,value', [((1, 2), 6)])
def test_batch_set_2D_array_and_tensor_2D_tuple_key(example_2d, key, value):
batch = batch_set(example_2d, key=key, value=value)
assert batch_get(batch, key) == value
@pytest.mark.parametrize('key,value', [([1, 2], torch.tensor([[3, 6, 9], [6, 12, 18]])),
(np.asarray([1, 2]), torch.tensor([[3, 6, 9], [6, 12, 18]])),
(torch.tensor([1, 2]), torch.tensor([[3, 6, 9], [6, 12, 18]]))])
def test_batch_set_2D_tensor_2D_seq_key(example_2D_tensor, key, value):
new_batch = batch_set(example_2D_tensor, key=key, value=value)
assert torch.equal(batch_get(new_batch, key), value)
def test_batch_set_2D_tensor_list_of_slices(example_2D_tensor):
key = [slice(2, 4), slice(1, 3)]
value = torch.tensor([[7, 14], [10, 20]])
new_batch = batch_set(example_2D_tensor, key=key, value=value)
assert torch.equal(batch_get(new_batch, key), value)
@pytest.mark.parametrize('key,value', [([1, 2], np.asarray([[3, 6, 9], [6, 12, 18]])),
(np.asarray([1, 2]), np.asarray([[3, 6, 9], [6, 12, 18]])),
(torch.tensor([1, 2]), np.asarray([[3, 6, 9], [6, 12, 18]]))])
def test_batch_set_2D_array_2D_seq_key(example_2D_array, key, value):
new_batch = batch_set(example_2D_array, key=key, value=value)
assert np.all(np.equal(batch_get(new_batch, key), value))
def test_batch_set_2D_array_list_of_slices(example_2D_array):
key = (slice(2, 4), slice(1, 3))
value = np.asarray([[7, 14], [10, 20]])
new_batch = batch_set(example_2D_array, key=key, value=value)
assert np.all(np.equal(batch_get(new_batch, key), value))
def test_batch_set_callable(example_dict, dict_setter, dict_getter):
new_batch = batch_set(example_dict, key=dict_setter, value=11)
assert batch_get(new_batch, dict_getter) == 11
def test_batch_set_pair_of_callables(example_dict, dict_getter, dict_setter):
new_batch = batch_set(example_dict, key=(dict_getter, dict_setter), value=11)
assert batch_get(new_batch, dict_getter) == 11
def test_batch_set_with_getter_errors_out(example_dict, dict_getter):
with pytest.raises(TypeError):
batch_set(example_dict, key=(dict_getter, dict_getter), value=11)
with pytest.raises(TypeError):
batch_set(example_dict, dict_getter, value=11)
def test_batch_set_not_pair_of_callables(example_dict, dict_setter):
# >2 callables
with pytest.raises(ValueError):
batch_set(example_dict, key=(dict_setter, dict_setter, dict_setter), value=11)
# Singleton of callable
with pytest.raises(ValueError):
batch_set(example_dict, (dict_setter,), value=11)
def test_set_with_mismatched_key_values(example_list):
with pytest.raises(ValueError):
batch_set(example_list, key=[1, 3, 5], value=[1, 2])
with pytest.raises(ValueError):
batch_set(example_list, key=[1, 3, 5], value=1)
# It's almost impossible to stop Counter and defaultdict from adding
# new items, so we don't include them here.
@pytest.mark.parametrize('batch', [
dict(zip(keys, my_list)),
MyClass(**dict(zip(keys, my_list))),
my_named_tuple(*my_list),
ChainMap(dict(zip(keys, my_list)), dict(a=7, j=3)),
OrderedDict(**dict(zip(keys, my_list)))
])
def test_batch_set_with_new_key_fails(batch):
with pytest.raises(Exception):
batch_set(batch, key='key_that_is_certainly_not_present', value=5)
| composer-dev | tests/utils/test_batch_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# disabling unused class checks in this test, as string enum checks happen during class construction
# pyright: reportUnusedClass=none
import pytest
from composer.utils.string_enum import StringEnum
def test_string_enum_invalid_name():
with pytest.raises(ValueError):
# names must be uppercase
class TestStringEnum(StringEnum):
names_must_be_uppercase = 'names_must_be_uppercase'
def test_string_enum_invalid_value():
with pytest.raises(ValueError):
class TestStringEnum(StringEnum):
VALUES_MUST_BE_LOWERCASE = 'VALUES_MUST_BE_LOWERCASE'
def test_string_enum_comparision():
class TestStringEnum(StringEnum):
HELLO_WORLD = 'hello_world'
with pytest.warns(UserWarning):
assert TestStringEnum.HELLO_WORLD == 'hello_world'
with pytest.warns(UserWarning):
assert TestStringEnum.HELLO_WORLD == 'HeLlO_WoRlD'
def test_missing():
class TestStringEnum(StringEnum):
HELLO_WORLD = 'hello_world'
real_val = TestStringEnum.HELLO_WORLD
assert real_val == TestStringEnum(real_val)
assert real_val == TestStringEnum('HeLlO_WoRlD')
with pytest.raises(ValueError):
TestStringEnum('unknown_name')
with pytest.raises(TypeError):
TestStringEnum(object())
| composer-dev | tests/utils/test_string_enum.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import io
import numpy as np
import pytest
import torch
from composer.utils import IteratorFileStream, ensure_tuple
def test_none_to_tuple():
assert ensure_tuple(None) == ()
@pytest.mark.parametrize('x', ['test', b'test', bytearray(b'test')])
def test_str_to_tuple(x):
assert ensure_tuple(x) == (x,)
@pytest.mark.parametrize('x', [(0, 1, 2), [0, 1, 2], range(3)])
def test_seq_to_tuple(x):
assert ensure_tuple(x) == (0, 1, 2)
@pytest.mark.parametrize('x', [{'t': 1, 'e': 2, 's': 3}])
def test_dict_to_tuple(x):
assert ensure_tuple(x) == (1, 2, 3)
@pytest.mark.parametrize('x', [torch.arange(3), np.arange(3)])
def test_obj_to_tuple(x):
assert ensure_tuple(x) == (x,)
def test_iter_to_stream():
x = [b'1234', b'56789', b'abcd']
iter1 = iter(x)
iter2 = iter(x)
assert b''.join(iter1) == io.BufferedReader(
IteratorFileStream(iter2),
buffer_size=io.DEFAULT_BUFFER_SIZE,
).read()
| composer-dev | tests/utils/test_iter_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import patch
import pytest
from composer.utils import dist
@pytest.mark.world_size(2)
def test_run_local_rank_first_context_raises_error():
# This mocking is necessary because there is a fixture called configure_dist that
# initializes dist for ALL tests, so we need to pretend that dist is not initialized
with patch('composer.utils.dist.dist.is_initialized') as mock_dist_is_initialized:
mock_dist_is_initialized.return_value = False
with pytest.raises(RuntimeError) as e:
with dist.run_local_rank_zero_first():
pass
# Verify error raised is intended
assert 'the distributed package is not available or has not been initialized' in str(e)
@pytest.mark.world_size(2)
def test_run_local_rank_first_context_runs_properly():
# There is a fixture called configure_dist that initializes dist for ALL tests,
# so dist is initialized here and this code should run without error
with dist.run_local_rank_zero_first():
pass
| composer-dev | tests/utils/test_dist.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import operator
import pytest
import torch
from torch import nn
from torch.fx import symbolic_trace
from torch.fx.graph_module import GraphModule
from torchvision import models
from composer.utils.fx_utils import apply_stochastic_residual, count_op_instances, fuse_parallel_linears, replace_op
class MyTestModel(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.factor = 0.5
def forward(self, x):
x = torch.add(x, self.factor)
return self.relu(x)
class AddModel(nn.Module):
def forward(self, x, y):
return x + y, torch.add(x, y), x.add(y)
@pytest.mark.parametrize(
'model_cls, ops, count',
[
(MyTestModel, nn.ReLU, 1),
(AddModel, operator.add, 1),
(AddModel, [operator.add, torch.add], 2),
(AddModel, [operator.add, torch.add, 'add'], 3),
],
)
def test_count_op_instances(model_cls, ops, count):
model = model_cls()
traced = symbolic_trace(model)
assert isinstance(traced, GraphModule)
assert count_op_instances(traced, ops) == count
@pytest.mark.parametrize(
'model_cls, src_ops, tgt_op, count',
[
(MyTestModel, torch.add, torch.mul, 1),
],
)
def test_replace_op(model_cls, src_ops, tgt_op, count):
model = model_cls()
traced = symbolic_trace(model)
assert isinstance(traced, GraphModule)
replace_op(traced, src_ops, tgt_op)
assert count_op_instances(traced, tgt_op) == count
class SimpleParallelLinears(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 64)
def forward(self, x):
y = self.fc1(x)
z = self.fc2(x)
return y + z
class ParallelLinears(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(64, 64)
self.ln = nn.LayerNorm(64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 64)
def forward(self, x):
y = self.fc1(x)
y = self.ln(y)
y = self.relu(y)
z = self.fc2(x)
return y + z
class NotFusibleLinears(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(64, 64, bias=False)
self.ln = nn.LayerNorm(64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 64)
def forward(self, x):
y = self.fc1(x)
y = self.ln(y)
y = self.relu(y)
z = self.fc2(x)
return y + z
class NotParallelLinears(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(64, 64)
self.ln = nn.LayerNorm(64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 64)
def forward(self, x):
y = self.fc1(x)
y = self.ln(y)
y = self.relu(y)
z = self.fc2(y)
return x + z
# Incorrect warning fixed in https://github.com/pytorch/pytorch/pull/61463
@pytest.mark.parametrize(
'model_cls, before_count, after_count',
[
(SimpleParallelLinears, 2, 1),
(ParallelLinears, 2, 1),
(NotParallelLinears, 2, 2),
(NotFusibleLinears, 2, 2),
],
)
@pytest.mark.filterwarnings(
r'ignore:Attempted to insert a call_module Node with no underlying reference in the owning GraphModule!.*:UserWarning'
)
def test_fuse_parallel_linears(model_cls, before_count, after_count):
model = model_cls()
traced = symbolic_trace(model)
assert isinstance(traced, GraphModule)
assert count_op_instances(traced, nn.Linear) == before_count
fuse_parallel_linears(traced)
assert count_op_instances(traced, nn.Linear) == after_count
@pytest.mark.parametrize(
'model_cls, block_count',
[(models.resnet18, 8)],
)
@pytest.mark.filterwarnings(
r'ignore:Attempted to insert a call_module Node with no underlying reference in the owning GraphModule!.*:UserWarning'
)
def test_stochastic_depth(model_cls, block_count):
model = model_cls()
traced = symbolic_trace(model)
assert isinstance(traced, GraphModule)
inp = torch.randn(1, 3, 224, 224)
traced_st_depth_no_drop, residual_count = apply_stochastic_residual(traced, 0.0)
out_traced = traced(inp)
out_traced_st_depth_no_drop = traced_st_depth_no_drop(inp)
assert torch.allclose(out_traced,
out_traced_st_depth_no_drop), 'mismatch in outputs with 0 drop rate for stochastic modules'
assert residual_count == block_count
| composer-dev | tests/utils/test_fx_utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import copy
import pathlib
from typing import Any, Dict, Tuple
from urllib.parse import urlparse
import pytest
from composer.utils.object_store import LibcloudObjectStore, ObjectStore, S3ObjectStore, SFTPObjectStore
from composer.utils.object_store.sftp_object_store import SFTPObjectStore
from tests.utils.object_store.object_store_settings import get_object_store_ctx, object_stores
@pytest.fixture
def bucket_uri_and_kwargs(request, s3_bucket: str, sftp_uri: str, test_session_name: str):
remote = request.node.get_closest_marker('remote') is not None
if request.param is LibcloudObjectStore:
if remote:
pytest.skip('Libcloud object store has no remote tests')
else:
bucket_uri = 'libcloud://.'
kwargs = {
'provider': 'local',
'container': '.',
'key_environ': 'OBJECT_STORE',
'provider_kwargs': {
'key': '.',
},
}
elif request.param is S3ObjectStore:
if remote:
bucket_uri = f's3://{s3_bucket}'
kwargs = {'bucket': s3_bucket, 'prefix': test_session_name}
else:
bucket_uri = 's3://my-bucket'
kwargs = {'bucket': 'my-bucket', 'prefix': 'folder/subfolder'}
elif request.param is SFTPObjectStore:
if remote:
bucket_uri = f"sftp://{sftp_uri.rstrip('/') + '/' + test_session_name}"
kwargs = {
'host': sftp_uri.rstrip('/') + '/' + test_session_name,
'missing_host_key_policy': 'WarningPolicy',
}
else:
bucket_uri = 'sftp://localhost:23'
kwargs = {
'host': 'localhost',
'port': 23,
'username': 'test_user',
}
else:
raise ValueError(f'Invalid object store type: {request.param.__name__}')
return bucket_uri, kwargs
class MockCallback:
def __init__(self, total_num_bytes: int) -> None:
self.total_num_bytes = total_num_bytes
self.transferred_bytes = 0
self.num_calls = 0
def __call__(self, transferred: int, total: int):
self.num_calls += 1
assert transferred == 0 or transferred >= self.transferred_bytes, 'transferred should be monotonically increasing'
self.transferred_bytes = transferred
assert total == self.total_num_bytes
def assert_all_data_transferred(self):
assert self.total_num_bytes == self.transferred_bytes
@pytest.mark.parametrize('bucket_uri_and_kwargs', object_stores, indirect=True)
@pytest.mark.parametrize('remote', [False, pytest.param(True, marks=pytest.mark.remote)])
class TestObjectStore:
@pytest.fixture
def object_store(
self,
bucket_uri_and_kwargs: Tuple[str, Dict[str, Any]],
monkeypatch: pytest.MonkeyPatch,
tmp_path: pathlib.Path,
remote: bool,
):
remote_backend_name_to_class = {'s3': S3ObjectStore, 'sftp': SFTPObjectStore, 'libcloud': LibcloudObjectStore}
bucket_uri, kwargs = bucket_uri_and_kwargs
remote_backend_name = urlparse(bucket_uri).scheme
with get_object_store_ctx(remote_backend_name_to_class[remote_backend_name],
kwargs,
monkeypatch,
tmp_path,
remote=remote):
copied_config = copy.deepcopy(kwargs)
# type error: Type[ObjectStore] is not callable
object_store = remote_backend_name_to_class[remote_backend_name](**copied_config) # type: ignore
with object_store:
yield object_store
@pytest.fixture
def dummy_obj(self, tmp_path: pathlib.Path):
tmpfile_path = tmp_path / 'file_to_upload'
with open(tmpfile_path, 'w+') as f:
f.write('dummy content')
return tmpfile_path
def test_upload(self, object_store: ObjectStore, dummy_obj: pathlib.Path, remote: bool):
del remote # unused
object_name = 'tmpfile_object_name'
cb = MockCallback(dummy_obj.stat().st_size)
object_store.upload_object(object_name, str(dummy_obj), callback=cb)
cb.assert_all_data_transferred()
def test_get_uri(self, object_store: ObjectStore, remote: bool):
if remote:
pytest.skip('This test_get_uri does not make any remote calls.')
uri = object_store.get_uri('tmpfile_object_name')
if isinstance(object_store, S3ObjectStore):
assert uri == 's3://my-bucket/folder/subfolder/tmpfile_object_name'
elif isinstance(object_store, LibcloudObjectStore):
assert uri == 'local://./tmpfile_object_name'
elif isinstance(object_store, SFTPObjectStore):
assert uri == 'sftp://test_user@localhost:23/tmpfile_object_name'
else:
raise NotImplementedError(f'Object store {type(object_store)} not implemented.')
def test_get_file_size(self, object_store: ObjectStore, dummy_obj: pathlib.Path, remote: bool):
del remote # unused
object_name = 'tmpfile_object_name'
object_store.upload_object(object_name, str(dummy_obj))
assert object_store.get_object_size(object_name) == dummy_obj.stat().st_size
def test_get_file_size_not_found(self, object_store: ObjectStore, remote: bool):
del remote # unused
with pytest.raises(FileNotFoundError):
object_store.get_object_size('not found object')
@pytest.mark.parametrize('overwrite', [True, False])
def test_download(
self,
object_store: ObjectStore,
dummy_obj: pathlib.Path,
tmp_path: pathlib.Path,
overwrite: bool,
remote: bool,
):
del remote # unused
object_name = 'tmpfile_object_name'
object_store.upload_object(object_name, str(dummy_obj))
filepath = str(tmp_path / 'destination_path')
cb = MockCallback(dummy_obj.stat().st_size)
object_store.download_object(object_name, filepath, callback=cb)
ctx = contextlib.nullcontext() if overwrite else pytest.raises(FileExistsError)
with ctx:
object_store.download_object(object_name, filepath, callback=cb, overwrite=overwrite)
cb.assert_all_data_transferred()
def test_download_not_found(self, object_store: ObjectStore, remote: bool):
with pytest.raises(FileNotFoundError):
object_store.download_object('not_found_object', filename='not used')
@pytest.mark.filterwarnings(r'ignore:setDaemon\(\) is deprecated:DeprecationWarning')
def test_filenames_as_environs(monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path):
key_filepath = tmp_path / 'keyfile'
key_filepath.touch()
monkeypatch.setenv('COMPOSER_SFTP_KEY_FILE', str(key_filepath))
hosts_file = tmp_path / 'host_file'
hosts_file.touch()
monkeypatch.setenv('COMPOSER_SFTP_KNOWN_HOSTS_FILE', str(hosts_file))
kwargs = {
'host': 'host',
'username': 'tester',
}
with get_object_store_ctx(SFTPObjectStore, kwargs, monkeypatch, tmp_path):
object_store = SFTPObjectStore(**kwargs)
assert object_store.key_filename == str(key_filepath)
assert object_store.known_hosts_filename == str(hosts_file)
| composer-dev | tests/utils/object_store/test_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/utils/object_store/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import os
import pathlib
from typing import Any, Dict, Type
import mockssh
import moto
import pytest
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import composer.utils.object_store
import composer.utils.object_store.sftp_object_store
from composer.utils.object_store import LibcloudObjectStore, ObjectStore, OCIObjectStore, S3ObjectStore, SFTPObjectStore
from composer.utils.object_store.sftp_object_store import SFTPObjectStore
from tests.common import get_module_subclasses
try:
import libcloud
_LIBCLOUD_AVAILABLE = True
del libcloud
except ImportError:
_LIBCLOUD_AVAILABLE = False
try:
import boto3
_BOTO3_AVAILABLE = True
del boto3
except ImportError:
_BOTO3_AVAILABLE = False
try:
import paramiko
_SFTP_AVAILABLE = True
del paramiko
except ImportError:
_SFTP_AVAILABLE = False
_object_store_marks = {
LibcloudObjectStore: [pytest.mark.skipif(not _LIBCLOUD_AVAILABLE, reason='Missing dependency')],
S3ObjectStore: [
pytest.mark.skipif(not _BOTO3_AVAILABLE, reason='Missing dependency'),
pytest.mark.filterwarnings(r'ignore::ResourceWarning'),
],
SFTPObjectStore: [
pytest.mark.skipif(not _SFTP_AVAILABLE, reason='Missing dependency'),
pytest.mark.filterwarnings(r'ignore:setDaemon\(\) is deprecated:DeprecationWarning'),
pytest.mark.filterwarnings(r'ignore:Unknown .* host key:UserWarning')
],
}
object_stores = [
pytest.param(x, marks=_object_store_marks[x], id=x.__name__)
for x in get_module_subclasses(composer.utils.object_store, ObjectStore)
# Note: OCI has its own test suite, so it is exempt from being included in this one.``
if not issubclass(x, OCIObjectStore)
]
@contextlib.contextmanager
def get_object_store_ctx(object_store_cls: Type[ObjectStore],
object_store_kwargs: Dict[str, Any],
monkeypatch: pytest.MonkeyPatch,
tmp_path: pathlib.Path,
remote: bool = False):
if object_store_cls is S3ObjectStore:
pytest.importorskip('boto3')
import boto3
if remote:
yield
else:
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'testing')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'testing')
monkeypatch.setenv('AWS_SECURITY_TOKEN', 'testing')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'testing')
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
with moto.mock_s3():
# create the dummy bucket
s3 = boto3.client('s3')
s3.create_bucket(Bucket=object_store_kwargs['bucket'])
yield
elif object_store_cls is LibcloudObjectStore:
pytest.importorskip('libcloud')
if remote:
pytest.skip('Libcloud object store has no remote tests.')
monkeypatch.setenv(object_store_kwargs['key_environ'], '.')
remote_dir = tmp_path / 'remote_dir'
os.makedirs(remote_dir)
if 'provider_kwargs' not in object_store_kwargs:
object_store_kwargs['provider_kwargs'] = {}
object_store_kwargs['provider_kwargs']['key'] = remote_dir
yield
elif object_store_cls is SFTPObjectStore:
pytest.importorskip('paramiko')
if remote:
pytest.skip('SFTP object store has no remote tests.')
else:
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
private_key_path = tmp_path / 'test_rsa_key'
username = object_store_kwargs['username']
with open(private_key_path, 'wb') as private_key_file:
private_key_file.write(pem)
with mockssh.Server(users={
username: str(private_key_path),
}) as server:
client = server.client(username)
monkeypatch.setattr(client, 'connect', lambda *args, **kwargs: None)
monkeypatch.setattr(composer.utils.object_store.sftp_object_store, 'SSHClient', lambda: client)
yield
else:
raise NotImplementedError('Parameterization not implemented')
| composer-dev | tests/utils/object_store/object_store_settings.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
from unittest.mock import MagicMock, Mock
import pytest
from composer.utils import OCIObjectStore
@pytest.fixture
def mock_bucket_name():
return 'my_bucket'
@pytest.fixture
def test_oci_obj_store(mock_bucket_name, monkeypatch):
oci = pytest.importorskip('oci')
# Mock all the oci functions.
mock_config = MagicMock()
mock_from_file = MagicMock(return_value=mock_config)
monkeypatch.setattr(oci.config, 'from_file', mock_from_file)
mock_object_storage_client = MagicMock()
monkeypatch.setattr(oci.object_storage, 'ObjectStorageClient', mock_object_storage_client)
mock_upload_manager = MagicMock()
monkeypatch.setattr(oci.object_storage, 'UploadManager', mock_upload_manager)
# Create OCIObjectStore object.
oci_os = OCIObjectStore(mock_bucket_name)
mock_namespace = 'my_namespace'
oci_os.namespace = mock_namespace
return oci_os
@pytest.mark.parametrize('result', ['success', 'bucket_not_found'])
def test_upload_object(test_oci_obj_store, monkeypatch, tmp_path, mock_bucket_name, result: str):
oci = pytest.importorskip('oci')
oci_os = test_oci_obj_store
mock_object_name = 'my_object'
file_to_upload = str(tmp_path / Path('my_upload.bin'))
with open(file_to_upload, 'wb') as f:
f.write(bytes(range(20)))
if result == 'success':
with monkeypatch.context() as m:
mock_upload_file = MagicMock()
m.setattr(oci_os.upload_manager, 'upload_file', mock_upload_file)
oci_os.upload_object(object_name=mock_object_name, filename=file_to_upload)
mock_upload_file.assert_called_once_with(namespace_name=oci_os.namespace,
bucket_name=mock_bucket_name,
object_name=mock_object_name,
file_path=file_to_upload)
else: # result = bucket_not_found
bucket_not_found_msg = f'Either the bucket named f{mock_bucket_name} does not exist in the namespace*'
mock_upload_file_with_exception = Mock(side_effect=oci.exceptions.ServiceError(
status=404, code='BucketNotFound', headers={'opc-request-id': 'foo'}, message=bucket_not_found_msg))
with monkeypatch.context() as m:
m.setattr(oci_os.upload_manager, 'upload_file', mock_upload_file_with_exception)
with pytest.raises(
ValueError,
match=
f'Bucket specified in oci://{mock_bucket_name}/{mock_object_name} not found. {bucket_not_found_msg}'
):
oci_os.upload_object(mock_object_name, filename=file_to_upload)
@pytest.mark.parametrize('result', ['success', 'file_exists', 'obj_not_found', 'bucket_not_found'])
def test_download_object(test_oci_obj_store, monkeypatch, tmp_path, mock_bucket_name, result: str):
oci = pytest.importorskip('oci')
oci_os = test_oci_obj_store
mock_object_name = 'my_object'
if result == 'success':
mock_response_object = MagicMock()
file_content = bytes(range(4))
mock_response_object.data.content = file_content
mock_get_object = MagicMock(return_value=mock_response_object)
monkeypatch.setattr(oci_os.client, 'get_object', mock_get_object)
file_to_download_to = str(tmp_path / Path('my_download.bin'))
oci_os.download_object(object_name=mock_object_name, filename=file_to_download_to)
mock_get_object.assert_called_once_with(namespace_name=oci_os.namespace,
bucket_name=mock_bucket_name,
object_name=mock_object_name)
with open(file_to_download_to, 'rb') as f:
actual_content = f.readline()
assert actual_content == file_content
elif result == 'file_exists':
file = tmp_path / Path('file_exists.bin')
file.touch()
filename = str(file)
with pytest.raises(FileExistsError,
match=f'The file at {filename} already exists and overwrite is set to False'):
oci_os.download_object(mock_object_name, filename=filename)
elif result == 'obj_not_found':
file_to_download_to = str(tmp_path / Path('my_obj_not_found_file.bin'))
obj_not_found_msg = f"The object '{mock_object_name}' was not found in the bucket f'{mock_bucket_name}'"
mock_get_object_fn_with_exception = Mock(side_effect=oci.exceptions.ServiceError(
status=404, code='ObjectNotFound', headers={'opc-request-id': 'foo'}, message=obj_not_found_msg))
with monkeypatch.context() as m:
m.setattr(oci_os.client, 'get_object', mock_get_object_fn_with_exception)
with pytest.raises(
FileNotFoundError,
match=f'Object oci://{mock_bucket_name}/{mock_object_name} not found. {obj_not_found_msg}'):
oci_os.download_object(mock_object_name, filename=file_to_download_to)
else: #result == 'bucket_not_found':
file_to_download_to = str(tmp_path / Path('my_bucket_not_found_file.bin'))
bucket_not_found_msg = f'Either the bucket named f{mock_bucket_name} does not exist in the namespace*'
mock_get_object_fn_with_exception = Mock(side_effect=oci.exceptions.ServiceError(
status=404, code='BucketNotFound', headers={'opc-request-id': 'foo'}, message=bucket_not_found_msg))
with monkeypatch.context() as m:
m.setattr(oci_os.client, 'get_object', mock_get_object_fn_with_exception)
with pytest.raises(
ValueError,
match=
f'Bucket specified in oci://{mock_bucket_name}/{mock_object_name} not found. {bucket_not_found_msg}'
):
oci_os.download_object(mock_object_name, filename=file_to_download_to)
@pytest.mark.parametrize('result', ['success', 'obj_not_found', 'bucket_not_found'])
def test_get_object_size(test_oci_obj_store, mock_bucket_name, monkeypatch, result: str):
oci = pytest.importorskip('oci')
oci_os = test_oci_obj_store
mock_object_name = 'my_object'
mock_object_size = 11
if result == 'success':
mock_response = MagicMock()
mock_response.status = 200
mock_response.data.headers = {'Content-Length': mock_object_size}
mock_get_object_fn = MagicMock(return_value=mock_response)
with monkeypatch.context() as m:
m.setattr(oci_os.client, 'get_object', mock_get_object_fn)
assert oci_os.get_object_size(mock_object_name) == mock_object_size
elif result == 'obj_not_found':
obj_not_found_msg = f"The object '{mock_object_name}' was not found in the bucket f'{mock_bucket_name}'"
mock_get_object_fn_with_exception = Mock(side_effect=oci.exceptions.ServiceError(
status=404, code='ObjectNotFound', headers={'opc-request-id': 'foo'}, message=obj_not_found_msg))
with monkeypatch.context() as m:
m.setattr(oci_os.client, 'get_object', mock_get_object_fn_with_exception)
with pytest.raises(
FileNotFoundError,
match=f'Object oci://{mock_bucket_name}/{mock_object_name} not found. {obj_not_found_msg}'):
oci_os.get_object_size(mock_object_name)
else: #result == 'bucket_not_found':
bucket_not_found_msg = f'Either the bucket named f{mock_bucket_name} does not exist in the namespace*'
mock_get_object_fn_with_exception = Mock(side_effect=oci.exceptions.ServiceError(
status=404, code='BucketNotFound', headers={'opc-request-id': 'foo'}, message=bucket_not_found_msg))
with monkeypatch.context() as m:
m.setattr(oci_os.client, 'get_object', mock_get_object_fn_with_exception)
with pytest.raises(
ValueError,
match=
f'Bucket specified in oci://{mock_bucket_name}/{mock_object_name} not found. {bucket_not_found_msg}'
):
oci_os.get_object_size(mock_object_name)
| composer-dev | tests/utils/object_store/test_oci_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import pytest
from composer.utils.object_store import LibcloudObjectStore
@pytest.fixture
def remote_dir(tmp_path: pathlib.Path):
remote_dir = tmp_path / 'remote_dir'
os.makedirs(remote_dir)
return remote_dir
@pytest.fixture
def local_dir(tmp_path: pathlib.Path):
local_dir = tmp_path / 'local_dir'
os.makedirs(local_dir)
return local_dir
def _get_provider(remote_dir: pathlib.Path, chunk_size: int = 1024 * 1024):
return LibcloudObjectStore(
provider='local',
container='.',
provider_kwargs={
'key': str(remote_dir),
},
chunk_size=chunk_size,
)
@pytest.mark.parametrize('chunk_size', [100, 128])
def test_libcloud_object_store_callback(remote_dir: pathlib.Path, local_dir: pathlib.Path, chunk_size: int):
pytest.importorskip('libcloud')
provider = _get_provider(remote_dir, chunk_size=chunk_size)
local_file_path = os.path.join(local_dir, 'dummy_file')
total_len = 1024
with open(local_file_path, 'w+') as f:
f.write('H' * total_len)
num_calls = 0
total_bytes_written = 0
def cb(bytes_written, total_bytes):
nonlocal num_calls, total_bytes_written
assert total_bytes == total_len
num_calls += 1
total_bytes_written = bytes_written
provider.upload_object('upload_object', local_file_path, callback=cb)
# the expected num calls should be 1 more than the ceiling division
expected_num_calls = (total_len - 1) // chunk_size + 1 + 1
assert num_calls == expected_num_calls
assert total_bytes_written == total_len
num_calls = 0
total_bytes_written = 0
local_file_path_download = os.path.join(local_dir, 'dummy_file_downloaded')
provider.download_object('upload_object', local_file_path_download, callback=cb)
assert total_bytes_written == total_len
assert num_calls == expected_num_calls
| composer-dev | tests/utils/object_store/test_libcloud_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import threading
import pytest
from composer.utils.object_store import S3ObjectStore
def _worker(bucket: str, tmp_path: pathlib.Path, tid: int):
object_store = S3ObjectStore(bucket=bucket)
os.makedirs(tmp_path / str(tid))
with pytest.raises(FileNotFoundError):
object_store.download_object('this_key_should_not_exist', filename=tmp_path / str(tid) / 'dummy_file')
# This test requires properly configured aws credentials; otherwise the s3 client would hit a NoCredentialsError
# when constructing the Session, which occurs before the bug this test checks
@pytest.mark.remote
def test_s3_object_store_multi_threads(tmp_path: pathlib.Path, s3_bucket: str):
"""Test to verify that we do not hit https://github.com/boto/boto3/issues/1592."""
pytest.importorskip('boto3')
threads = []
# Manually tried fewer threads; it seems that 100 is needed to reliably re-produce the bug
for i in range(100):
t = threading.Thread(target=_worker, kwargs={'bucket': s3_bucket, 'tid': i, 'tmp_path': tmp_path})
t.start()
threads.append(t)
for t in threads:
t.join()
| composer-dev | tests/utils/object_store/test_s3_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import json
import os
import tempfile
from contextlib import nullcontext
from pathlib import Path
from typing import Any, Dict, List, Optional
from unittest.mock import patch
from urllib.parse import urlparse
import pytest
import torch
from packaging import version
from torch.utils.data import DataLoader
from torchmetrics import Metric
from torchmetrics.classification import MulticlassAccuracy
from composer.metrics import InContextLearningLMAccuracy, LanguageCrossEntropy, MaskedAccuracy
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
from composer.utils import dist, is_model_fsdp
from tests.common.datasets import RandomTextClassificationDataset, RandomTextLMDataset
from tests.common.markers import device, world_size
from tests.common.models import (configure_tiny_bert_model, configure_tiny_bert_tokenizer, configure_tiny_gpt2_model,
configure_tiny_gpt2_tokenizer, configure_tiny_t5_model, configure_tiny_t5_tokenizer)
from tests.loggers.test_remote_uploader_downloader import DummyObjectStore
@pytest.mark.parametrize('num_classes', [2, 3])
def test_hf_train_eval_predict(num_classes: int, tiny_bert_config):
transformers = pytest.importorskip('transformers')
tiny_bert_config.num_labels = num_classes
hf_model = transformers.AutoModelForSequenceClassification.from_config(
tiny_bert_config) # type: ignore (thirdparty)
metrics = MulticlassAccuracy(num_classes=num_classes, average='micro')
model = HuggingFaceModel(hf_model, metrics=[metrics], use_logits=True)
vocab_size = 30522 # Match bert vocab size
sequence_length = 4
num_classes = num_classes
size = 16
batch_size = 8
train_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes,
use_keys=True)
eval_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes,
use_keys=True)
predict_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes,
use_keys=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size, sampler=dist.get_sampler(eval_dataset))
predict_dataloader = DataLoader(predict_dataset, batch_size=batch_size)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration='1ep',
eval_dataloader=eval_dataloader,
)
trainer.fit()
trainer.eval()
# Check that there is some train/eval accuracy
assert trainer.state.train_metrics['MulticlassAccuracy'].compute() != 0.0
assert trainer.state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
predictions = trainer.predict(predict_dataloader)
# Check that the output predictions are the expected shape
num_predict_batches_expected = ((size - 1) // batch_size) + 1
assert len(predictions) == num_predict_batches_expected
assert predictions[0]['logits'].shape == (batch_size, num_classes)
def check_hf_tokenizer_equivalence(tokenizer1, tokenizer2):
"""This is a best effort attempt to compare two tokenizers for equivalence
This is not a perfect test, but it should catch most issues. We first check that the vocab is identical
and that a string is tokenized the same one. Then we compare the __dict__ of the tokenizers, but we remove
some keys that are not important for equivalence. See the inline explanations for each one.
"""
#
assert tokenizer1.vocab == tokenizer2.vocab
assert type(tokenizer1) == type(tokenizer2)
expected_tokenizer_output = tokenizer2('This is some text that should get tokenizer !? @ totallyarealtoken')
actual_tokenizer_output = tokenizer1('This is some text that should get tokenizer !? @ totallyarealtoken')
assert expected_tokenizer_output == actual_tokenizer_output
# we remove the actual _tokenizer object because it is an instantiated object and so does not pass equality
# the tokenizers are not usable below these pops
tokenizer1.__dict__.pop('_tokenizer')
tokenizer2.__dict__.pop('_tokenizer')
# extra key that is not important
tokenizer1.__dict__.pop('deprecation_warnings')
tokenizer2.__dict__.pop('deprecation_warnings')
# name_or_path will be the path that the tokenizer was loaded from, which will just be a temporary directory for
# the reloaded tokenizer, so we remove it and don't compare it between the two tokenizers
tokenizer1.__dict__.pop('name_or_path')
tokenizer2.__dict__.pop('name_or_path')
tokenizer1.init_kwargs.pop('name_or_path', None)
tokenizer2.init_kwargs.pop('name_or_path', None)
# tokenizer.init_kwargs['model_max_length'] is unset when the tokenizer does not specify it, but is set
# to a very large number when you save and reload, so here we just check that its the same if it is present in
# both tokenizers. There is a separate tokenizer.model_max_length that will still get checked for equivalence
model_max_length_1 = tokenizer1.init_kwargs.get('model_max_length', None)
model_max_length_2 = tokenizer2.init_kwargs.get('model_max_length', None)
if model_max_length_1 is not None and model_max_length_2 is not None:
assert model_max_length_1 == model_max_length_2
tokenizer1.__dict__['init_kwargs'].pop('model_max_length', None)
tokenizer2.__dict__['init_kwargs'].pop('model_max_length', None)
# tokenizer.init_kwargs['tokenizer_file'] is unset when the tokenizer does not specify it, but is set to
# None when you save and reload, so here we just check that its the same if it is present in both tokenizers.
tokenizer_file_1 = tokenizer1.init_kwargs.get('tokenizer_file', None)
tokenizer_file_2 = tokenizer2.init_kwargs.get('tokenizer_file', None)
if tokenizer_file_1 is not None or tokenizer_file_2 is not None:
assert tokenizer_file_1 == tokenizer_file_2
tokenizer1.__dict__['init_kwargs'].pop('tokenizer_file', None)
tokenizer2.__dict__['init_kwargs'].pop('tokenizer_file', None)
assert tokenizer1.__dict__ == tokenizer2.__dict__
def check_hf_model_equivalence(model1, model2):
expected_model_config_dict = model1.config.to_dict()
new_model_config_dict = model2.config.to_dict()
assert expected_model_config_dict == new_model_config_dict
assert sum(p.numel() for p in model1.parameters()) == sum(p.numel() for p in model2.parameters())
assert all(type(module1) == type(module2) for module1, module2 in zip(model1.modules(), model2.modules()))
@pytest.mark.parametrize('pass_in_tokenizer', [True, False])
@pytest.mark.parametrize('modify_tokenizer', [True, False])
@pytest.mark.parametrize('num_classes', [2, 3])
def test_hf_state_dict_info(tmp_path: Path, pass_in_tokenizer: bool, modify_tokenizer: bool, num_classes: int,
tiny_bert_tokenizer, tiny_bert_config):
transformers = pytest.importorskip('transformers')
if not pass_in_tokenizer and modify_tokenizer:
pytest.skip("Invalid parametrization. Cannot modify the tokenizer if it doesn't exist.")
tiny_bert_config.num_labels = num_classes
tokenizer = tiny_bert_tokenizer if pass_in_tokenizer else None
hf_model = transformers.AutoModelForSequenceClassification.from_config(
tiny_bert_config) # type: ignore (thirdparty)
if modify_tokenizer:
assert tokenizer is not None # pyright
tokenizer.add_special_tokens({'bos_token': '[NEWSPECIAL]'})
tokenizer.add_special_tokens({'additional_special_tokens': ['[MOSAICML']})
tokenizer.add_tokens(['totallyarealtoken', 'mosaicml'])
hf_model.resize_token_embeddings(len(tokenizer))
metrics = MulticlassAccuracy(num_classes=num_classes, average='micro')
model = HuggingFaceModel(hf_model, tokenizer=tokenizer, metrics=[metrics], use_logits=True)
vocab_size = 30522 # Match bert vocab size
sequence_length = 4
size = 4
batch_size = 2
train_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes,
use_keys=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
max_duration='1ep',
save_folder=str(tmp_path),
save_interval='1ep',
save_filename='hf-checkpoint.pt')
trainer.save_checkpoint(str(tmp_path / 'hf-checkpoint.pt'))
loaded_checkpoint = torch.load(Path(tmp_path) / 'hf-checkpoint.pt')
hf_state = loaded_checkpoint['state']['integrations']['huggingface']
hf_model_state = hf_state['model']
hf_tokenizer_state = hf_state['tokenizer']
assert hf_model_state['config']['class'] == 'transformers.models.bert.modeling_bert.BertForSequenceClassification'
loaded_config_dict = hf_model_state['config']['content']
# JSON keys need to be converted back to ints, huggingface does not auto convert them along this code path
if 'id2label' in loaded_config_dict:
loaded_config_dict['id2label'] = {int(k): v for k, v in loaded_config_dict['id2label'].items()}
loaded_config = transformers.AutoConfig.from_pretrained(loaded_config_dict['_name_or_path'], **loaded_config_dict)
new_model_from_loaded_config = transformers.AutoModelForSequenceClassification.from_config(loaded_config)
check_hf_model_equivalence(new_model_from_loaded_config, hf_model)
if pass_in_tokenizer:
assert tokenizer is not None # pyright
with tempfile.TemporaryDirectory() as _tmp_dir:
for filename, saved_content in hf_tokenizer_state.items():
with open(Path(_tmp_dir) / f'{filename}{saved_content["file_extension"]}', 'w') as _tmp_file:
if saved_content['file_extension'] == '.json':
json.dump(saved_content['content'], _tmp_file)
elif saved_content['file_extension'] == '.txt':
for line in saved_content['content']:
_tmp_file.write(line)
_tmp_file.write('\n')
loaded_tokenizer = transformers.AutoTokenizer.from_pretrained(_tmp_dir)
# for an unknown reason this key is missing when loading the saved tokenizer, but present with a value of None
# for the original tokenizer
loaded_tokenizer.init_kwargs['tokenizer_file'] = loaded_tokenizer.init_kwargs.get('tokenizer_file', None)
check_hf_tokenizer_equivalence(loaded_tokenizer, tokenizer)
else:
assert hf_tokenizer_state == {}
def get_lm_trainer(hf_model,
hf_tokenizer,
save_folder,
load_path: Optional[str] = None,
is_conditional_generation: bool = False,
do_eval: bool = False,
fsdp_config: Optional[Dict[str, Any]] = None):
transformers = pytest.importorskip('transformers')
metrics: List[Metric] = [LanguageCrossEntropy(ignore_index=-100)]
if not is_conditional_generation:
metrics.append(MaskedAccuracy(ignore_index=-100))
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, metrics=metrics, use_logits=True)
vocab_size = hf_model.config.vocab_size
sequence_length = 4
size = 4
batch_size = 4
train_dataset = RandomTextLMDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
use_keys=True,
use_token_type_ids=not is_conditional_generation,
conditional_generation=is_conditional_generation)
if not is_conditional_generation:
collator = transformers.DataCollatorForLanguageModeling(tokenizer=hf_tokenizer, mlm_probability=0.15)
else:
# Note: this could be transformers.DataCollatorForSeq2Seq(tokenizer=hf_tokenizer, model=hf_model),
# but we want to test the scenario where the input batch does not have decoder_input_ids,
# which DataCollatorForSeq2Seq automatically adds
collator = transformers.DefaultDataCollator()
train_dataloader = DataLoader(train_dataset,
batch_size=batch_size,
collate_fn=collator,
sampler=dist.get_sampler(train_dataset))
eval_dataloader = None
if do_eval:
eval_dataloader = DataLoader(train_dataset,
batch_size=batch_size,
collate_fn=collator,
sampler=dist.get_sampler(train_dataset))
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='1ep',
save_folder=save_folder,
save_interval='1ep',
save_filename='hf-checkpoint.pt',
load_path=load_path,
fsdp_config=fsdp_config)
return trainer
@pytest.mark.parametrize('pass_in_tokenizer', [True, False])
def test_hf_no_tokenizer_warning(caplog, pass_in_tokenizer: bool, tiny_bert_model, tiny_bert_tokenizer):
pytest.importorskip('transformers')
import logging
with caplog.at_level(logging.WARNING, logger='composer'):
_ = HuggingFaceModel(tiny_bert_model,
tokenizer=tiny_bert_tokenizer if pass_in_tokenizer else None,
metrics=[],
use_logits=True)
if pass_in_tokenizer:
assert len(caplog.messages) == 0
else:
assert caplog.messages[
0] == 'The tokenizer was not provided. This means the tokenizer config will not be saved in the checkpoint.'
@pytest.mark.parametrize('checkpoint_upload_path', [None, 's3://checkpoints-bucket/remote-checkpoint.pt'])
@pytest.mark.parametrize('local_save_filename', [None, 'local-checkpoint.pt'])
def test_hf_loading_load_save_paths(checkpoint_upload_path: Optional[str], local_save_filename: Optional[str],
tmp_path: Path, tiny_bert_model, tiny_bert_tokenizer):
pytest.importorskip('transformers')
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer.save_checkpoint(str(tmp_path / 'hf-checkpoint.pt'))
# Just upload the checkpoint to a dummy object store outside of composer to make mocking easier
if checkpoint_upload_path is not None:
parsed_uri = urlparse(checkpoint_upload_path)
object_store = DummyObjectStore(Path(parsed_uri.netloc))
object_store.upload_object(parsed_uri.path, str(tmp_path / 'hf-checkpoint.pt'))
checkpoint_load_path = str(tmp_path /
'hf-checkpoint.pt') if checkpoint_upload_path is None else checkpoint_upload_path
local_save_checkpoint_path = None
if local_save_filename is not None:
local_save_checkpoint_path = str(tmp_path / 'hf-checkpoint-local.pt')
with patch('composer.utils.file_helpers.S3ObjectStore', DummyObjectStore):
hf_loaded_model, hf_loaded_tokenizer = HuggingFaceModel.hf_from_composer_checkpoint(
checkpoint_path=checkpoint_load_path, local_checkpoint_save_location=local_save_checkpoint_path)
check_hf_model_equivalence(hf_loaded_model, tiny_bert_model)
check_hf_tokenizer_equivalence(hf_loaded_tokenizer, tiny_bert_tokenizer)
if local_save_checkpoint_path is not None:
assert os.path.exists(local_save_checkpoint_path)
if checkpoint_upload_path is None:
# the save location should be a symlink if the load path was already a local path
assert os.path.islink(local_save_checkpoint_path)
else:
# just check that we ended up with an actual file, not a symlink
assert os.path.getsize(local_save_checkpoint_path) > 1000
@pytest.mark.parametrize('modify_tokenizer', [False, True])
def test_hf_loading_tokenizer(modify_tokenizer: bool, tmp_path: Path, tiny_bert_model, tiny_bert_tokenizer):
pytest.importorskip('transformers')
if modify_tokenizer:
assert tiny_bert_tokenizer is not None # pyright
tiny_bert_tokenizer.add_special_tokens({'bos_token': '[NEWSPECIAL]'})
tiny_bert_tokenizer.add_special_tokens({'additional_special_tokens': ['[MOSAICML']})
tiny_bert_tokenizer.add_tokens(['totallyarealtoken', 'mosaicml'])
tiny_bert_model.resize_token_embeddings(len(tiny_bert_tokenizer))
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer.save_checkpoint(str(tmp_path / 'hf-checkpoint.pt'))
hf_loaded_model, hf_loaded_tokenizer = HuggingFaceModel.hf_from_composer_checkpoint(
checkpoint_path=str(tmp_path / 'hf-checkpoint.pt'))
check_hf_model_equivalence(hf_loaded_model, tiny_bert_model)
check_hf_tokenizer_equivalence(hf_loaded_tokenizer, tiny_bert_tokenizer)
@pytest.mark.parametrize('num_classes', [None, 2, 3])
@pytest.mark.parametrize('model_class_name',
['default', 'autoseq', 'bertseq', 'customseq', 'bertseq_string', 'autoseq_string'])
def test_hf_loading_model_classes(model_class_name: str, num_classes: Optional[int], tmp_path: Path, tiny_bert_model,
tiny_bert_tokenizer):
transformers = pytest.importorskip('transformers')
if num_classes is not None and model_class_name not in {'autoseq', 'bertseq', 'customseq'}:
pytest.skip('Invalid parametrization. num_classes is only for loading sequence classification models.')
if num_classes is None and model_class_name in {'autoseq', 'bertseq', 'customseq'}:
pytest.skip('Invalid parametrization. num_classes cannot be None for loading sequence classification models.')
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer.save_checkpoint(str(tmp_path / 'hf-checkpoint.pt'))
class CustomSequenceClassification(transformers.BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
self.custom_attribute = 'mosaicml'
model_class_name_to_class = {
'autoseq': transformers.AutoModelForSequenceClassification,
'bertseq': transformers.BertForSequenceClassification,
'default': None,
'customseq': CustomSequenceClassification,
'bertseq_string': 'transformers.models.bert.modeling_bert.BertForSequenceClassification',
'autoseq_string': 'transformers.AutoModelForSequenceClassification'
}
model_class = model_class_name_to_class[model_class_name]
extra_model_args = {}
if num_classes is not None:
extra_model_args['num_labels'] = num_classes
hf_loaded_model, hf_loaded_tokenizer = HuggingFaceModel.hf_from_composer_checkpoint(
checkpoint_path=str(tmp_path / 'hf-checkpoint.pt'),
model_instantiation_class=model_class,
model_config_kwargs=extra_model_args)
expected_model = tiny_bert_model
if model_class_name == 'autoseq':
config = copy.deepcopy(tiny_bert_model.config)
config.update(extra_model_args)
expected_model = model_class.from_config(config)
elif model_class_name in {'bertseq', 'customseq'}:
config = copy.deepcopy(tiny_bert_model.config)
config.update(extra_model_args)
expected_model = model_class(config)
elif model_class_name == 'bertseq_string':
config = copy.deepcopy(tiny_bert_model.config)
config.update(extra_model_args)
expected_model = transformers.BertForSequenceClassification(config)
elif model_class_name == 'autoseq_string':
config = copy.deepcopy(tiny_bert_model.config)
config.update(extra_model_args)
expected_model = transformers.AutoModelForSequenceClassification.from_config(config)
if model_class_name == 'customseq':
assert hf_loaded_model.custom_attribute == expected_model.custom_attribute
check_hf_model_equivalence(hf_loaded_model, expected_model)
check_hf_tokenizer_equivalence(hf_loaded_tokenizer, tiny_bert_tokenizer)
def test_hf_loading_full_model_equivalence(tmp_path: Path, tiny_bert_model, tiny_bert_tokenizer):
pytest.importorskip('transformers')
trainer1 = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer1.fit()
hf_loaded_model, hf_loaded_tokenizer = HuggingFaceModel.hf_from_composer_checkpoint(
checkpoint_path=str(tmp_path / 'hf-checkpoint.pt'))
trainer2 = get_lm_trainer(hf_loaded_model,
hf_loaded_tokenizer,
str(tmp_path),
load_path=str(tmp_path / 'hf-checkpoint.pt'))
# loading from the last checkpoint gets you the same model
for p1, p2 in zip(trainer1.state.model.parameters(), trainer2.state.model.parameters()):
torch.testing.assert_close(p1, p2)
@pytest.mark.parametrize('model_class_name', ['gpt', 'not_a_module', 'not_a_class'])
def test_hf_loading_errors(tiny_bert_model, tiny_bert_tokenizer, model_class_name, tmp_path):
transformers = pytest.importorskip('transformers')
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer.save_checkpoint(str(tmp_path / 'hf-checkpoint.pt'))
# The compatibility of the model chosen and the model saved are up to huggingface code, but we test
# here that one incompatible combination of BertConfig and GPT2Model errors out
model_class_name_to_class = {
'gpt': transformers.GPT2Model,
'not_a_module': 'not_a_module.BertForSequenceClassification',
'not_a_class': 'transformers.not_a_class'
}
error_contexts = {
'gpt': pytest.raises(AttributeError),
'not_a_module': pytest.raises(ValueError),
'not_a_class': pytest.raises(ValueError)
}
with error_contexts[model_class_name]:
_, _ = HuggingFaceModel.hf_from_composer_checkpoint(str(tmp_path / 'hf-checkpoint.pt'),
model_class_name_to_class[model_class_name])
@pytest.mark.parametrize('model,tokenizer', [(configure_tiny_gpt2_model, configure_tiny_gpt2_tokenizer),
(configure_tiny_bert_model, configure_tiny_bert_tokenizer)])
def test_hf_auto_shift_labels(caplog, model, tokenizer):
pytest.importorskip('transformers')
hf_model = model()
hf_tokenizer = tokenizer()
# Confirm that shift_labels is automatically set to True for gpt2 and False for bert
if hf_model.config.model_type == 'gpt':
import logging
hf_model.resize_token_embeddings(len(hf_tokenizer))
with caplog.at_level(logging.WARNING, logger='composer'):
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer)
assert model.shift_labels == True
assert len(caplog.messages) == 0
# A warning should be generated if using a Causal LM and setting shift_labels to False
with caplog.at_level(logging.WARNING, logger='composer'):
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, shift_labels=False)
assert model.shift_labels == False
assert caplog.messages[
0] == 'The shift_labels argument was set to False but the model is an instance of a HuggingFace Causal LM. This may lead to incorrect behavior.'
if hf_model.config.model_type == 'bert':
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer)
assert model.shift_labels == False
def test_hf_causal_shift_labels(tiny_gpt2_model, tiny_gpt2_tokenizer):
pytest.importorskip('transformers')
tiny_gpt2_model.resize_token_embeddings(len(tiny_gpt2_tokenizer))
model = HuggingFaceModel(tiny_gpt2_model, tokenizer=tiny_gpt2_tokenizer, use_logits=True)
batch = tiny_gpt2_tokenizer('a b c d e f g h i j k', return_tensors='pt')
batch['labels'] = batch['input_ids'].clone()
_ = model.eval_forward(batch)
assert isinstance(model.labels, torch.Tensor)
assert torch.all(model.labels[..., :3] == batch['input_ids'][..., 1:4])
assert torch.all(model.labels[..., -1] == -100)
def test_encoder_decoder(tiny_t5_model, tiny_t5_tokenizer):
pytest.importorskip('transformers')
trainer = get_lm_trainer(tiny_t5_model, tiny_t5_tokenizer, None, is_conditional_generation=True, do_eval=True)
trainer.fit()
trainer.eval()
@pytest.mark.gpu
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_hf_fsdp(tiny_bert_config, tiny_bert_tokenizer):
transformers = pytest.importorskip('transformers')
tiny_bert_model = transformers.AutoModelForMaskedLM.from_config(tiny_bert_config)
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
'min_params': 1e8,
'cpu_offload': False,
'mixed_precision': 'PURE',
'backward_prefetch': 'BACKWARD_PRE',
'activation_checkpointing': False,
'activation_cpu_offload': False,
'verbose': False
}
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, None, fsdp_config=fsdp_config)
assert is_model_fsdp(trainer.state.model)
assert trainer.state.fsdp_enabled
trainer.fit()
def test_separate_eval_metrics(tiny_bert_model, tiny_bert_tokenizer):
pytest.importorskip('transformers')
hf_model = HuggingFaceModel(
tiny_bert_model,
tokenizer=tiny_bert_tokenizer,
metrics=[LanguageCrossEntropy()],
eval_metrics=[MaskedAccuracy(), InContextLearningLMAccuracy()],
)
assert hf_model.train_metrics is not None
assert hf_model.val_metrics is not None
assert hf_model.train_metrics.keys() == {'LanguageCrossEntropy'}
assert hf_model.val_metrics.keys() == {'InContextLearningLMAccuracy', 'MaskedAccuracy'}
@pytest.mark.parametrize('checkpoint_upload_folder', [None, 's3://checkpoints-bucket/'])
@pytest.mark.parametrize('local_save_filename', [None, 'local-checkpoint.pt'])
def test_write_hf_from_composer(checkpoint_upload_folder, local_save_filename, tiny_bert_model, tiny_bert_tokenizer,
tmp_path):
transformers = pytest.importorskip('transformers')
from composer.models.huggingface import write_huggingface_pretrained_from_composer_checkpoint
if checkpoint_upload_folder is None:
checkpoint_upload_folder = tmp_path
trainer = get_lm_trainer(tiny_bert_model, tiny_bert_tokenizer, str(tmp_path))
trainer.fit()
# Just upload to a dummy object store outside of composer to make mocking easier
if str(checkpoint_upload_folder).startswith('s3://'):
parsed_uri = urlparse(checkpoint_upload_folder)
object_store = DummyObjectStore(Path(parsed_uri.netloc))
object_store.upload_object(parsed_uri.path + 'hf-checkpoint.pt', str(tmp_path / 'hf-checkpoint.pt'))
with patch('composer.utils.file_helpers.S3ObjectStore', DummyObjectStore):
checkpoint_path = os.path.join(checkpoint_upload_folder, 'hf-checkpoint.pt')
write_huggingface_pretrained_from_composer_checkpoint(checkpoint_path,
tmp_path / 'hf-save-pretrained',
local_checkpoint_save_location=local_save_filename)
assert os.path.exists(tmp_path / 'hf-save-pretrained' / 'config.json')
assert os.path.exists(tmp_path / 'hf-save-pretrained' / 'pytorch_model.bin')
loaded_hf_model = transformers.AutoModelForMaskedLM.from_pretrained(tmp_path / 'hf-save-pretrained')
# set _name_or_path so that the equivalence check passes. It is expected that these are different, because one is loaded from disk, while one is loaded from the hub
loaded_hf_model.config._name_or_path = tiny_bert_model.config._name_or_path
check_hf_model_equivalence(tiny_bert_model, loaded_hf_model)
@pytest.mark.parametrize('embedding_resize', ['higher', 'lower', 'no_resize'])
@pytest.mark.parametrize('allow_embedding_resizing', [True, False])
def test_embedding_resizing(tiny_bert_model, tiny_bert_tokenizer, embedding_resize, allow_embedding_resizing, caplog):
pytest.importorskip('transformers')
import logging
from composer.models import HuggingFaceModel
original_size = tiny_bert_model.config.vocab_size
if embedding_resize == 'higher':
tiny_bert_model.resize_token_embeddings(original_size + 100)
elif embedding_resize == 'lower':
tiny_bert_model.resize_token_embeddings(original_size - 100)
error_context = pytest.raises(ValueError) if (not allow_embedding_resizing and
embedding_resize == 'lower') else nullcontext()
with caplog.at_level(logging.WARNING, logger='composer'):
with error_context:
_ = HuggingFaceModel(tiny_bert_model,
tokenizer=tiny_bert_tokenizer,
allow_embedding_resizing=allow_embedding_resizing)
if embedding_resize == 'lower':
if allow_embedding_resizing:
# when the embedding size is smaller than the tokenizer vocab size,
# the embeddings should get resized to match the tokenizer vocab size
assert tiny_bert_model.config.vocab_size == len(tiny_bert_tokenizer)
assert caplog.messages[0].startswith(
'The number of tokens in the tokenizer is greater than the number of tokens in the model')
elif embedding_resize == 'higher':
# when the embedding size is greater than the tokenizer vocab size,
# no adjustment is needed. Some embeddings will simply not be used
assert tiny_bert_model.config.vocab_size == original_size + 100
assert caplog.messages[0].startswith(
'The number of tokens in the tokenizer is less than the number of tokens in the model.')
elif embedding_resize == 'no_resize':
assert tiny_bert_model.config.vocab_size == original_size
assert len(caplog.messages) == 0
else:
raise ValueError(f'Unknown embedding_resize: {embedding_resize}')
@device('cpu', 'gpu')
@world_size(1, 2)
@pytest.mark.parametrize('use_fsdp', [True, False])
@pytest.mark.parametrize('hf_model,hf_tokenizer', [(configure_tiny_gpt2_model, configure_tiny_gpt2_tokenizer),
(configure_tiny_t5_model, configure_tiny_t5_tokenizer)])
def test_generate(device, world_size, hf_model, hf_tokenizer, use_fsdp):
if use_fsdp and version.parse(torch.__version__) < version.parse('1.13.0'):
pytest.skip('FSDP requires torch >= 1.13.0')
transformers = pytest.importorskip('transformers')
if device == 'cpu' and use_fsdp:
pytest.skip('FSDP is not supported on CPU.')
if world_size == 1 and use_fsdp:
pytest.xfail((
'Generation with world size 1 and FSDP fails with '
'`RuntimeError: The tensor has a non-zero number of elements, '
'but its data is not allocated yet. Caffe2 uses a lazy allocation, '
'so you will need to call mutable_data() or raw_mutable_data() to actually allocate memory.` '
'This issue is resolved with world size > 1 by a dummy call to forward (see HuggingFaceModel.dummy_forward_called), '
'but for some reason fails with world size 1.'))
fsdp_config = None
if use_fsdp:
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
}
hf_model = hf_model()
hf_tokenizer = hf_tokenizer()
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, use_logits=True)
# just instantiating Trainer to go through the normal FSDP code path
trainer = Trainer(model=model, fsdp_config=fsdp_config, device=device)
device = trainer.state.device
if isinstance(hf_tokenizer, transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast):
hf_tokenizer.padding_side = 'left'
input_dict = hf_tokenizer(['hello', 'goodbyes'], return_tensors='pt', padding=True)
for k, v in input_dict.items():
input_dict[k] = device.tensor_to_device(v)
generation1 = model.generate(**input_dict, max_new_tokens=5, pad_token_id=hf_tokenizer.pad_token_id)
generation2 = model.generate(**input_dict, max_new_tokens=3, pad_token_id=hf_tokenizer.pad_token_id)
assert generation1.shape == (2,
(input_dict['input_ids'].shape[1] if not hf_model.config.is_encoder_decoder else 1) +
5)
assert generation2.shape == (2,
(input_dict['input_ids'].shape[1] if not hf_model.config.is_encoder_decoder else 1) +
3)
decoded_generation1 = hf_tokenizer.batch_decode(generation1, skip_special_tokens=True)
decoded_generation2 = hf_tokenizer.batch_decode(generation2, skip_special_tokens=True)
assert len(decoded_generation1) == len(decoded_generation2) == 2
assert all(isinstance(decoded_generation, str) for decoded_generation in decoded_generation1)
assert all(isinstance(decoded_generation, str) for decoded_generation in decoded_generation2)
@device('cpu', 'gpu')
@world_size(1, 2)
@pytest.mark.parametrize('use_fsdp', [True, False])
@pytest.mark.parametrize('hf_model,hf_tokenizer', [(configure_tiny_gpt2_model, configure_tiny_gpt2_tokenizer),
(configure_tiny_t5_model, configure_tiny_t5_tokenizer)])
def test_eval_forward_generate(device, world_size, hf_model, hf_tokenizer, use_fsdp):
if use_fsdp and version.parse(torch.__version__) < version.parse('1.13.0'):
pytest.skip('FSDP requires torch >= 1.13.0')
transformers = pytest.importorskip('transformers')
if device == 'cpu' and use_fsdp:
pytest.skip('FSDP is not supported on CPU.')
if world_size == 1 and use_fsdp:
pytest.xfail((
'Generation with world size 1 and FSDP fails with '
'`RuntimeError: The tensor has a non-zero number of elements, '
'but its data is not allocated yet. Caffe2 uses a lazy allocation, '
'so you will need to call mutable_data() or raw_mutable_data() to actually allocate memory.` '
'This issue is resolved with world size > 1 by a dummy call to forward (see HuggingFaceModel.dummy_forward_called), '
'but for some reason fails with world size 1.'))
fsdp_config = None
if use_fsdp:
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
}
hf_model = hf_model()
hf_tokenizer = hf_tokenizer()
model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, use_logits=True)
# just instantiating Trainer to go through the normal FSDP code path
trainer = Trainer(model=model, fsdp_config=fsdp_config, device=device)
device = trainer.state.device
if isinstance(hf_tokenizer, transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast):
hf_tokenizer.padding_side = 'left'
input_dict = hf_tokenizer(['hello', 'goodbyes'], return_tensors='pt', padding=True)
for k, v in input_dict.items():
input_dict[k] = device.tensor_to_device(v)
input_dict['mode'] = 'generate'
input_dict['generation_length'] = 5
input_dict['labels'] = [['answer1'], ['answer2']]
generation1 = model.eval_forward(input_dict, None)
input_dict['generation_length'] = 3
input_dict['labels'] = [['answer1'], ['answer2']]
generation2 = model.eval_forward(input_dict, None)
assert len(generation1) == len(generation2) == 2
assert all(isinstance(decoded_generation, str) for decoded_generation in generation1)
assert all(isinstance(decoded_generation, str) for decoded_generation in generation2)
| composer-dev | tests/models/test_hf_model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.utils.data import DataLoader
from composer.models.gpt2 import create_gpt2
from composer.trainer import Trainer
from tests.common.datasets import RandomTextLMDataset
def test_gpt2_hf_factory(tiny_gpt2_config, tiny_gpt2_tokenizer, monkeypatch):
transformers = pytest.importorskip('transformers')
monkeypatch.setattr('transformers.AutoConfig.from_pretrained', lambda x: tiny_gpt2_config)
gpt2_composer_model = create_gpt2(use_pretrained=False,
pretrained_model_name='dummy',
model_config=None,
tokenizer_name=None,
gradient_checkpointing=False)
train_dataset = RandomTextLMDataset(size=8,
vocab_size=tiny_gpt2_tokenizer.vocab_size,
sequence_length=8,
use_keys=True)
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tiny_gpt2_tokenizer, mlm=False)
train_dataloader = DataLoader(train_dataset, batch_size=4, collate_fn=collator)
trainer = Trainer(model=gpt2_composer_model, train_dataloader=train_dataloader, max_duration='1ep')
trainer.fit()
assert trainer.state.train_metrics['LanguagePerplexity'].compute() > 0.0
| composer-dev | tests/models/test_gpt2.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from composer.models.efficientnetb0.efficientnets import EfficientNet
@pytest.mark.gpu
def test_efficientb0_activate_shape():
# Running this test on cuda as convolutions are slow on CPU
random_input = torch.rand(2, 3, 224, 224).cuda()
model = EfficientNet.get_model_from_name(
'efficientnet-b0',
num_classes=1000,
drop_connect_rate=0.2,
).cuda()
# Test Stem
out = model.conv_stem(random_input)
out = model.bn1(out)
out = model.act1(out)
assert out.shape == (2, 32, 112, 112)
# Test each block, shapes found at Table 1 of EfficientNet paper
block_act_shape = [
(2, 16, 112, 112),
(2, 24, 56, 56),
(2, 24, 56, 56),
(2, 40, 28, 28),
(2, 40, 28, 28),
(2, 80, 14, 14),
(2, 80, 14, 14),
(2, 80, 14, 14),
(2, 112, 14, 14),
(2, 112, 14, 14),
(2, 112, 14, 14),
(2, 192, 7, 7),
(2, 192, 7, 7),
(2, 192, 7, 7),
(2, 192, 7, 7),
(2, 320, 7, 7),
]
for i, block in enumerate(model.blocks):
out = block(out)
assert out.shape == block_act_shape[i]
out = model.conv_head(out)
assert out.shape == (2, 1280, 7, 7)
| composer-dev | tests/models/test_efficientnet.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
@pytest.fixture
def mmdet_detection_batch():
batch_size = 2
num_labels_per_image = 20
image_size = 224
return {
'img_metas': [{
'filename': '../../data/coco/train2017/fake_img.jpg',
'ori_filename': 'fake_image.jpg',
'img_shape': (image_size, image_size, 3),
'ori_shape': (image_size, image_size, 3),
'pad_shape': (image_size, image_size, 3),
'scale_factor': np.array([1., 1., 1., 1.], dtype=np.float32)
}] * batch_size,
'img':
torch.zeros(batch_size, 3, image_size, image_size, dtype=torch.float32),
'gt_bboxes': [torch.zeros(num_labels_per_image, 4, dtype=torch.float32)] * batch_size,
'gt_labels': [torch.zeros(num_labels_per_image, dtype=torch.int64)] * batch_size
}
@pytest.fixture
def mmdet_detection_eval_batch():
# Eval settings for mmdetection datasets have an extra list around inputs.
batch_size = 2
num_labels_per_image = 20
image_size = 224
return {
'img_metas': [[{
'filename': '../../data/coco/train2017/fake_img.jpg',
'ori_filename': 'fake_image.jpg',
'img_shape': (image_size, image_size, 3),
'ori_shape': (image_size, image_size, 3),
'pad_shape': (image_size, image_size, 3),
'scale_factor': np.array([1., 1., 1., 1.], dtype=np.float32),
}] * batch_size],
'img': [torch.zeros(batch_size, 3, image_size, image_size, dtype=torch.float32)],
'gt_bboxes': [[torch.zeros(num_labels_per_image, 4, dtype=torch.float32)] * batch_size],
'gt_labels': [[torch.zeros(num_labels_per_image, dtype=torch.int64)] * batch_size]
}
@pytest.fixture
def yolox_config():
# from https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox/yolox_s_8x8_300e_coco.py
return dict(
type='YOLOX',
input_size=(640, 640),
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(type='YOLOXPAFPN', in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
@pytest.fixture
def faster_rcnn_config():
# modified from https://github.com/open-mmlab/mmdetection/blob/master/configs/_base_/models/faster_rcnn_r50_fpn.py
return dict(
type='FasterRCNN',
backbone=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
rpn_head=dict(type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(type='StandardRoIHead',
bbox_roi_extractor=dict(type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(rpn=dict(assigner=dict(type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(assigner=dict(type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0),
rcnn=dict(score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
def test_mmdet_model_forward_yolox(mmdet_detection_batch, yolox_config):
pytest.importorskip('mmdet')
from mmcv import ConfigDict
from mmdet.models import build_detector
from composer.models import MMDetModel
config = ConfigDict(yolox_config)
# non pretrained model to avoid a slow test that downloads the weights.
model = build_detector(config)
model.init_weights()
model = MMDetModel(model=model)
out = model(mmdet_detection_batch)
assert list(out.keys()) == ['loss_cls', 'loss_bbox', 'loss_obj']
def test_mmdet_model_eval_forward_yolox(mmdet_detection_eval_batch, yolox_config):
pytest.importorskip('mmdet')
from mmcv import ConfigDict
from mmdet.models import build_detector
from composer.models import MMDetModel
config = ConfigDict(yolox_config)
# non pretrained model to avoid a slow test that downloads the weights.
model = build_detector(config)
model.init_weights()
model = MMDetModel(model=model)
out = model.eval_forward(mmdet_detection_eval_batch)
assert len(out) == mmdet_detection_eval_batch['img'][0].shape[0] # batch size
assert list(out[0].keys()) == ['labels', 'boxes', 'scores']
def test_mmdet_model_forward_faster_rcnn(mmdet_detection_batch, faster_rcnn_config):
pytest.importorskip('mmdet')
from mmcv import ConfigDict
from mmdet.models import build_detector
from composer.models import MMDetModel
config = ConfigDict(faster_rcnn_config)
# non pretrained model to avoid a slow test that downloads the weights.
model = build_detector(config)
model.init_weights()
model = MMDetModel(model=model)
out = model(mmdet_detection_batch)
assert list(out.keys()) == ['loss_rpn_cls', 'loss_rpn_bbox', 'loss_cls', 'acc', 'loss_bbox']
| composer-dev | tests/models/test_mmdet_model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import copy
import pickle
from typing import Iterable
import pytest
import torch
from torch.utils.data import DataLoader
from composer.trainer import Trainer
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleConvModel, SimpleModel
@pytest.mark.parametrize('model', [SimpleConvModel, SimpleModel])
def test_composermodel_torchscriptable(model):
torch.jit.script(model())
@pytest.fixture()
def dataloader():
return DataLoader(RandomClassificationDataset())
def test_model_access_to_logger(dataloader: Iterable):
model = SimpleModel(num_features=1, num_classes=2)
assert model.logger is None
trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader)
assert model.logger is trainer.logger
def test_model_deepcopy(dataloader: Iterable):
model = SimpleModel(num_features=1, num_classes=2)
assert model.logger is None
trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader)
assert model.logger is not None
copied_model = copy.deepcopy(trainer.state.model)
assert copied_model.logger is model.logger
assert model.num_classes == copied_model.num_classes
def test_model_copy(dataloader: Iterable):
model = SimpleModel(num_features=1, num_classes=2)
assert model.logger is None
trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader)
assert model.logger is not None
copied_model = copy.copy(trainer.state.model)
assert copied_model.logger is model.logger
assert model.num_classes == copied_model.num_classes
def test_model_pickle(dataloader: Iterable):
model = SimpleModel(num_features=1, num_classes=2)
assert model.logger is None
trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader)
assert model.logger is not None
pickled_model = pickle.dumps(trainer.state.model)
restored_model = pickle.loads(pickled_model)
# after pickling the model, the restored loggers should be None, since the logger cannot be serialized
assert restored_model.logger is None
assert model.num_classes == restored_model.num_classes
| composer-dev | tests/models/test_composer_model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
from torch.utils.data import DataLoader
from composer.models.bert import create_bert_classification, create_bert_mlm
from composer.trainer import Trainer
from tests.common.datasets import RandomTextClassificationDataset, RandomTextLMDataset
def test_bert_mlm_hf_factory(tiny_bert_config, tiny_bert_tokenizer, monkeypatch):
transformers = pytest.importorskip('transformers')
monkeypatch.setattr('transformers.AutoConfig.from_pretrained', lambda x: tiny_bert_config)
bert_composer_model = create_bert_mlm(use_pretrained=False,
pretrained_model_name='dummy',
model_config=None,
tokenizer_name=None,
gradient_checkpointing=False)
train_dataset = RandomTextLMDataset(size=8,
vocab_size=tiny_bert_tokenizer.vocab_size,
sequence_length=8,
use_keys=True)
collator = transformers.DataCollatorForLanguageModeling(tokenizer=tiny_bert_tokenizer,
mlm=True,
mlm_probability=0.15)
train_dataloader = DataLoader(train_dataset, batch_size=4, collate_fn=collator)
trainer = Trainer(model=bert_composer_model, train_dataloader=train_dataloader, max_duration='1ep')
trainer.fit()
assert trainer.state.train_metrics['LanguageCrossEntropy'].compute() > 0.0
def test_bert_classification_hf_factory(tiny_bert_config, tiny_bert_tokenizer, monkeypatch):
pytest.importorskip('transformers')
def config_patch(x, num_labels):
tiny_bert_config.num_labels = num_labels
return tiny_bert_config
monkeypatch.setattr('transformers.AutoConfig.from_pretrained', config_patch)
bert_composer_model = create_bert_classification(use_pretrained=False,
pretrained_model_name='dummy',
model_config=None,
tokenizer_name=None,
gradient_checkpointing=False,
num_labels=3)
train_dataset = RandomTextClassificationDataset(size=8,
vocab_size=tiny_bert_tokenizer.vocab_size,
sequence_length=8,
num_classes=3,
use_keys=True)
train_dataloader = DataLoader(train_dataset, batch_size=4)
trainer = Trainer(model=bert_composer_model, train_dataloader=train_dataloader, max_duration='1ep')
trainer.fit()
assert trainer.state.train_metrics['MulticlassAccuracy'].compute() > 0.0
| composer-dev | tests/models/test_bert.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/cli/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import subprocess
import sys
from typing import List
import pytest
import composer
@pytest.mark.parametrize('args', [
['composer', '--version'],
[sys.executable, '-m', 'composer', '--version'],
[sys.executable, '-m', 'composer.cli', '--version'],
[sys.executable, '-m', 'composer.cli.launcher', '--version'],
])
def test_cli_version(args: List[str]):
version_str = subprocess.check_output(args, text=True)
assert version_str == f'MosaicML Composer {composer.__version__}\n'
| composer-dev | tests/cli/test_cli.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Contains commonly used models that are shared across the test suite."""
import copy
from functools import partial
from typing import Any, Dict, Optional, Tuple, Union
import pytest
import torch
from torchmetrics import Metric, MetricCollection
from composer.metrics import CrossEntropy, MIoU
from composer.metrics.nlp import LanguageCrossEntropy, MaskedAccuracy
from composer.models import ComposerClassifier, HuggingFaceModel
class SimpleModel(ComposerClassifier):
"""Small classification model.
Args:
num_features (int): number of input features (default: 1)
num_classes (int): number of classes (default: 2)
"""
def __init__(self, num_features: int = 1, num_classes: int = 2) -> None:
self.num_features = num_features
self.num_classes = num_classes
fc1 = torch.nn.Linear(num_features, 5)
fc2 = torch.nn.Linear(5, num_classes)
net = torch.nn.Sequential(
torch.nn.AdaptiveAvgPool2d(1),
torch.nn.Flatten(),
fc1,
torch.nn.ReLU(),
fc2,
torch.nn.Softmax(dim=-1),
)
super().__init__(module=net, num_classes=num_classes)
# Important: It is crucial that the FC layers are bound to `self`
# for the optimizer surgery tests.
# These tests attempt to perform surgery on `fc1` layer, and we want
# to make sure that post-surgery, self.fc1 refers to the same parameters
# as self.net[1]
self.fc1 = fc1
self.fc2 = fc2
class SimpleMLP(torch.nn.Module):
def __init__(self, num_features: int, device: str):
super().__init__()
self.fc1 = torch.nn.Linear(num_features, num_features, device=device, bias=False)
self.fc2 = torch.nn.Linear(num_features, num_features, device=device, bias=False)
self.net = torch.nn.Sequential(self.fc1, torch.nn.ReLU(), self.fc2)
def forward(self, x):
return self.net(x)
class SimpleWeightTiedModel(ComposerClassifier):
"""Small classification model with tied weights.
Typically this model will be used to test weight tying w/ FSDP
Args:
num_features (int): number of input features (default: 1)
tie_weights (bool): whether or not to tie weights (default: True)
device (str): the device to initialize the model (default: 'cpu')
"""
def __init__(self, num_features: int = 1, device: str = 'cpu') -> None:
self.num_features = num_features
mlp = SimpleMLP(num_features, device)
net = torch.nn.Sequential(
mlp,
torch.nn.Softmax(dim=-1),
)
super().__init__(module=net, num_classes=num_features)
self.mlp = mlp
self.net = net
self.net.param_init_fn = self.param_init_fn
self.mlp.fc1.weight = self.mlp.fc2.weight
def param_init_fn(self, module):
init_fn = partial(torch.nn.init.normal_, mean=0.0, std=0.1)
if isinstance(module, torch.nn.Linear):
init_fn(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
class EmbeddedWeightTiedModel(ComposerClassifier):
"""A small classification model that consists of two simple MLPs,
and we tie weights across the simple MLPs.
Typically this model will be used to test weight tying w/ FSDP.
Args:
num_features (int): number of input features (default: 1)
device (str): the device to initialize the model (default: 'cpu')
"""
def __init__(self, num_features: int = 1, device: str = 'cpu') -> None:
net1 = SimpleMLP(num_features, device)
net2 = SimpleMLP(num_features, device)
net = torch.nn.Sequential(
net1,
net2,
torch.nn.Softmax(dim=-1),
)
super().__init__(module=net, num_classes=num_features)
self.module.param_init_fn = self.param_init_fn
self.net1 = net1
self.net2 = net2
self.net1.fc1.weight = self.net2.fc1.weight
def param_init_fn(self, module):
init_fn = partial(torch.nn.init.normal_, mean=0.0, std=0.1)
if isinstance(module, torch.nn.Linear):
init_fn(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
class SimpleConvModel(ComposerClassifier):
"""Small convolutional classifier.
Args:
num_channels (int): number of input channels (default: 3)
num_classes (int): number of classes (default: 2)
"""
def __init__(self,
num_channels: int = 3,
num_classes: int = 2,
norm: Optional[str] = None,
norm_affine: bool = True) -> None:
self.num_classes = num_classes
self.num_channels = num_channels
conv_args = {'kernel_size': (3, 3), 'padding': 1, 'stride': 2}
conv1 = torch.nn.Conv2d(in_channels=num_channels, out_channels=8, **conv_args)
conv2 = torch.nn.Conv2d(in_channels=8, out_channels=4, **conv_args)
norm_layer = None
if norm is None:
norm_layer = torch.nn.Identity()
elif norm == 'batch':
norm_layer = torch.nn.BatchNorm2d(4, affine=norm_affine)
elif norm == 'instance':
norm_layer = torch.nn.InstanceNorm2d(4, affine=norm_affine)
elif norm == 'layer':
norm_layer = torch.nn.LayerNorm(4, elementwise_affine=norm_affine)
elif norm == 'group':
norm_layer = torch.nn.GroupNorm(2, 4, affine=norm_affine)
else:
raise ValueError(f'Unknown norm: {norm}')
pool = torch.nn.AdaptiveAvgPool2d(1)
flatten = torch.nn.Flatten()
fc1 = torch.nn.Linear(4, 16)
fc2 = torch.nn.Linear(16, num_classes)
net = torch.nn.Sequential(
conv1,
conv2,
norm_layer,
pool,
flatten,
fc1,
fc2,
)
super().__init__(module=net, num_classes=self.num_classes)
# bind these to class for access during surgery tests
self.conv1 = conv1
self.conv2 = conv2
class SimpleSegmentationModel(ComposerClassifier):
"""Small convolutional classifier.
Args:
num_channels (int): number of input channels (default: 3)
num_classes (int): number of classes (default: 2)
"""
def __init__(self, num_channels: int = 3, num_classes: int = 2) -> None:
self.num_classes = num_classes
self.num_channels = num_channels
conv_args = {'kernel_size': (3, 3), 'padding': 'same', 'stride': 1}
conv1 = torch.nn.Conv2d(in_channels=num_channels, out_channels=8, **conv_args)
conv2 = torch.nn.Conv2d(in_channels=8, out_channels=num_classes, **conv_args)
net = torch.nn.Sequential(
conv1,
conv2,
)
train_metrics = MetricCollection([CrossEntropy(), MIoU(num_classes)])
val_metrics = MetricCollection([CrossEntropy(), MIoU(num_classes)])
super().__init__(module=net, train_metrics=train_metrics, val_metrics=val_metrics)
# bind these to class for access during surgery tests
self.conv1 = conv1
self.conv2 = conv2
class Mean(torch.nn.Module):
def forward(self, x):
return torch.mean(x, dim=1)
class SimpleTransformerBase(torch.nn.Module):
"""Base encoding transformer model for testing"""
def __init__(self, vocab_size: int = 100, d_model: int = 16):
super().__init__()
embedding = torch.nn.Embedding(vocab_size, 16)
layer = torch.nn.TransformerEncoderLayer(d_model=d_model, nhead=2, dim_feedforward=d_model, dropout=0.3)
# necessary to make the model scriptable
layer.__constants__ = []
transformer = torch.nn.TransformerEncoder(layer, num_layers=2, norm=torch.nn.LayerNorm(d_model))
# necessary to make the model scriptable
transformer.__constants__ = []
self.net = torch.nn.Sequential(embedding, transformer)
self.embedding = embedding
self.transformer = transformer
def forward(self, batch: torch.Tensor) -> torch.Tensor:
return self.net(batch)
class SimpleTransformerMaskedLM(ComposerClassifier):
def __init__(self, vocab_size: int = 100):
self.vocab_size = vocab_size
transformer_base = SimpleTransformerBase(vocab_size=vocab_size, d_model=16)
lm_head = torch.nn.Linear(16, vocab_size)
net = torch.nn.Sequential(transformer_base, lm_head)
mlm_metrics = MetricCollection(LanguageCrossEntropy(ignore_index=-100), MaskedAccuracy(ignore_index=-100))
loss = torch.nn.CrossEntropyLoss()
super().__init__(module=net, train_metrics=mlm_metrics, val_metrics=mlm_metrics, loss_fn=loss)
self.transformer_base = transformer_base
self.lm_head = lm_head
def loss(self, outputs: torch.Tensor, batch: Union[Tuple[Any, torch.Tensor], Dict[str, Any]], *args,
**kwargs) -> torch.Tensor:
if isinstance(batch, tuple):
_, targets = batch
else:
targets = batch['labels']
return self._loss_fn(outputs.view(-1, self.vocab_size), targets.view(-1), *args, **kwargs)
def forward(self, batch: Union[Tuple[torch.Tensor, Any], Dict[str, Any]]) -> torch.Tensor:
if isinstance(batch, tuple):
inputs, _ = batch
else:
inputs = batch['input_ids']
outputs = self.module(inputs)
return outputs
def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
if isinstance(batch, tuple):
_, targets = batch
else:
targets = batch['labels']
metric.update(outputs, targets)
class SimpleTransformerClassifier(ComposerClassifier):
"""Transformer model for testing"""
def __init__(self, vocab_size: int = 10, num_classes: int = 2):
transformer_base = SimpleTransformerBase(vocab_size=vocab_size, d_model=16)
pooler = Mean()
dropout = torch.nn.Dropout(0.3)
classifier = torch.nn.Linear(16, num_classes)
net = torch.nn.Sequential(transformer_base, pooler, dropout, classifier)
super().__init__(module=net, num_classes=num_classes)
self.transformer_base = transformer_base
self.pooler = pooler
self.classifier = classifier
class ConvModel(ComposerClassifier):
"""Convolutional network featuring strided convs, a batchnorm, max pooling, and average pooling."""
def __init__(self):
conv_args = {'kernel_size': (3, 3), 'padding': 1}
conv1 = torch.nn.Conv2d(in_channels=32, out_channels=8, stride=2, bias=False, **conv_args) # stride > 1
conv2 = torch.nn.Conv2d(in_channels=8, out_channels=32, stride=2, bias=False,
**conv_args) # stride > 1 but in_channels < 16
conv3 = torch.nn.Conv2d(in_channels=32, out_channels=64, stride=1, bias=False, **conv_args) # stride = 1
bn = torch.nn.BatchNorm2d(num_features=64)
pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
pool2 = torch.nn.AdaptiveAvgPool2d(1)
flatten = torch.nn.Flatten()
linear1 = torch.nn.Linear(64, 48)
linear2 = torch.nn.Linear(48, 10)
net = torch.nn.Sequential(
conv1,
conv2,
conv3,
bn,
pool1,
pool2,
flatten,
linear1,
linear2,
)
super().__init__(module=net, num_classes=10)
# bind these to class for access during surgery tests
self.conv1 = conv1
self.conv2 = conv2
self.conv3 = conv3
self.bn = bn
self.pool1 = pool1
self.pool2 = pool2
self.flatten = flatten
self.linear1 = linear1
self.linear2 = linear2
class SimpleModelWithDropout(ComposerClassifier):
def __init__(self, num_features: int = 64, num_classes: int = 10) -> None:
fc1 = torch.nn.Linear(num_features, 512)
fc2 = torch.nn.Linear(512, num_classes)
dropout = torch.nn.Dropout(0.5)
net = torch.nn.Sequential(
torch.nn.Flatten(),
fc1,
torch.nn.ReLU(),
dropout,
fc2,
torch.nn.Softmax(dim=-1),
)
super().__init__(module=net, num_classes=num_classes)
self.fc1 = fc1
self.fc2 = fc2
self.dropout = dropout
def loss(self, outputs: torch.Tensor, batch: Tuple[Any, torch.Tensor], *args, **kwargs) -> torch.Tensor:
_, targets = batch
targets = targets.squeeze(dim=0)
return self._loss_fn(outputs, targets, *args, **kwargs)
def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
_, targets = batch
metric.update(outputs.squeeze(dim=0), targets.squeeze(dim=0))
def forward(self, batch: Tuple[torch.Tensor, Any]) -> torch.Tensor:
inputs, _ = batch
inputs = inputs.squeeze(dim=0)
outputs = self.module(inputs)
return outputs
# Note: These methods are an alternative to the tiny_bert fixtures in fixtures.py.
# Fixtures cannot be used natively as parametrized inputs, which we require when
# we wish to run a test across multiple models, one of which is a HuggingFace model.
# As a workaround, we inject objects into the PyTest namespace. Tests should not directly
# use pytest.{var}, but instead should import and use these helper copy methods so the
# objects in the PyTest namespace do not change.
def configure_tiny_bert_model():
try:
return copy.deepcopy(pytest.tiny_bert_model)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_bert_tokenizer():
try:
return copy.deepcopy(pytest.tiny_bert_tokenizer)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_bert_config():
try:
return copy.deepcopy(pytest.tiny_bert_config)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_bert_hf_model(use_logits=True):
return HuggingFaceModel(configure_tiny_bert_model(), configure_tiny_bert_tokenizer(), use_logits)
def configure_tiny_gpt2_model():
try:
return copy.deepcopy(pytest.tiny_gpt2_model)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_gpt2_tokenizer():
try:
return copy.deepcopy(pytest.tiny_gpt2_tokenizer)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_gpt2_config():
try:
return copy.deepcopy(pytest.tiny_gpt2_config)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_gpt2_hf_model(use_logits=True):
return HuggingFaceModel(configure_tiny_gpt2_model(), configure_tiny_gpt2_tokenizer(), use_logits)
def configure_tiny_t5_model():
try:
return copy.deepcopy(pytest.tiny_t5_model)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_t5_tokenizer():
try:
return copy.deepcopy(pytest.tiny_t5_tokenizer)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_t5_config():
try:
return copy.deepcopy(pytest.tiny_t5_config)
except AttributeError:
pytest.skip('Composer installed without NLP support')
def configure_tiny_t5_hf_model(use_logits=True):
return HuggingFaceModel(configure_tiny_t5_model(), configure_tiny_t5_tokenizer(), use_logits)
| composer-dev | tests/common/models.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Dict
from composer.core import Callback, Event, State
from composer.loggers import Logger
class EventCounterCallback(Callback):
def __init__(self) -> None:
self.event_to_num_calls: Dict[Event, int] = {}
for event in Event:
self.event_to_num_calls[event] = 0
def run_event(self, event: Event, state: State, logger: Logger):
del state, logger # unused
self.event_to_num_calls[event] += 1
def state_dict(self) -> Dict[str, Any]:
return {'events': self.event_to_num_calls}
def load_state_dict(self, state: Dict[str, Any]) -> None:
self.event_to_num_calls.update(state['events'])
| composer-dev | tests/common/events.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Sequence
import pytest
import torch
from PIL import Image
from torch.utils.data import DataLoader, Dataset, IterableDataset
from torchvision.datasets import VisionDataset
from composer.utils import dist
from tests.common.models import configure_tiny_bert_tokenizer, configure_tiny_gpt2_tokenizer
class InfiniteClassificationDataset(IterableDataset):
"""Classification dataset that never ends.
Args:
shape (Sequence[int]): shape of features (default: (1, 1, 1))
num_classes (int): number of classes (default: 2)
"""
def __init__(self, shape: Sequence[int] = (1, 1, 1), num_classes: int = 2):
self.shape = shape
self.num_classes = num_classes
def __iter__(self):
while True:
yield torch.randn(*self.shape), torch.randint(0, self.num_classes, size=(1,))[0]
class RandomClassificationDataset(Dataset):
"""Classification dataset drawn from a normal distribution.
Args:
shape (Sequence[int]): shape of features (default: (1, 1, 1))
size (int): number of samples (default: 100)
num_classes (int): number of classes (default: 2)
"""
def __init__(self, shape: Sequence[int] = (1, 1, 1), size: int = 100, num_classes: int = 2):
self.size = size
self.shape = shape
self.num_classes = num_classes
self.x = None
self.y = None
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randn(self.size, *self.shape)
if self.y is None:
self.y = torch.randint(0, self.num_classes, size=(self.size,))
return self.x[index], self.y[index]
class RandomImageDataset(VisionDataset):
""" Image Classification dataset with values drawn from a normal distribution
Args:
shape (Sequence[int]): shape of features. Defaults to (32, 32, 3)
size (int): number of samples (default: 100)
num_classes (int): number of classes (default: 2)
is_PIL (bool): if true, will emit image in PIL format (default: False)
"""
def __init__(self, shape: Sequence[int] = (3, 32, 32), size: int = 100, num_classes: int = 2, is_PIL: bool = False):
self.is_PIL = is_PIL
if is_PIL: # PIL expects HWC
shape = (shape[1], shape[2], shape[0])
self.shape = shape
self.num_classes = num_classes
self.size = size
self.x = None
self.y = None
super().__init__(root='')
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randn(self.size, *self.shape)
if self.y is None:
self.y = torch.randint(0, self.num_classes, size=(self.size,))
x = self.x[index]
y = self.y[index]
if self.is_PIL:
x = x.numpy()
x = (x - x.min())
x = (x * (255 / x.max())).astype('uint8')
x = Image.fromarray(x)
if self.transform is not None:
return self.transform(x), y
else:
return x, y
class RandomSegmentationDataset(VisionDataset):
""" Image Segmentation dataset with values drawn from a normal distribution
Args:
shape (Sequence[int]): shape of features. Defaults to (32, 32, 3)
size (int): number of samples (default: 100)
num_classes (int): number of classes (default: 2)
is_PIL (bool): if true, will emit image in PIL format (default: False)
"""
def __init__(self, shape: Sequence[int] = (3, 32, 32), size: int = 100, num_classes: int = 2, is_PIL: bool = False):
self.is_PIL = is_PIL
if is_PIL: # PIL expects HWC
shape = (shape[1], shape[2], shape[0])
self.shape = shape
self.num_classes = num_classes
self.size = size
self.x = None
self.y = None
super().__init__(root='')
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randn(self.size, *self.shape)
if self.y is None:
mask_shape = self.shape[:2] if self.is_PIL else self.shape[1:]
self.y = torch.randint(0, self.num_classes, size=(self.size, *mask_shape))
x = self.x[index]
y = self.y[index]
if self.is_PIL:
x = x.numpy()
x = (x - x.min())
x = (x * (255 / x.max())).astype('uint8')
x = Image.fromarray(x)
if self.transform is not None:
return self.transform(x), y
else:
return x, y
class RandomTextClassificationDataset(Dataset):
""" Text classification dataset with values (just input token ids) drawn uniformly
Args:
vocab_size (int): vocab size to use (default: 10)
size (int): number of samples (default: 100)
num_classes (int): number of classes (default: 2)
sequence_length (int): sequence length to use, all sequences will be of this length with no padding (default: 8)
use_keys: (bool): whether to return the item in a dictionary with keys for input and output
"""
def __init__(self,
size: int = 100,
vocab_size: int = 10,
sequence_length: int = 8,
num_classes: int = 2,
use_keys: bool = False):
self.vocab_size = vocab_size
self.sequence_length = sequence_length
self.num_classes = num_classes
self.use_keys = use_keys
self.input_key = 'input_ids'
self.label_key = 'labels'
self.size = size
self.x = None
self.y = None
super().__init__()
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randint(low=0, high=self.vocab_size, size=(self.size, self.sequence_length))
if self.y is None:
self.y = torch.randint(low=0, high=self.num_classes, size=(self.size,))
x = self.x[index]
y = self.y[index]
if self.use_keys:
return {'input_ids': x, 'labels': y}
else:
return x, y
class RandomTextLMDataset(Dataset):
""" Text LM dataset with values (just input token ids) drawn uniformly
Args:
vocab_size (int): vocab size to use (default: 10)
size (int): number of samples (default: 100)
sequence_length (int): sequence length to use, all sequences will be of this length with no padding (default: 8)
use_keys: (bool): whether to return the item in a dictionary with keys for input and output
"""
def __init__(self,
size: int = 100,
vocab_size: int = 10,
sequence_length: int = 8,
use_keys: bool = False,
use_token_type_ids: bool = True,
conditional_generation: bool = False):
self.vocab_size = vocab_size
self.sequence_length = sequence_length
self.use_keys = use_keys
self.use_token_type_ids = use_token_type_ids
self.conditional_generation = conditional_generation
self.input_key = 'input_ids'
self.size = size
self.x = None
self.y = None
super().__init__()
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randint(low=0, high=self.vocab_size, size=(self.size, self.sequence_length))
if self.conditional_generation:
self.y = torch.randint(low=0, high=self.vocab_size, size=(self.size, 2 * self.sequence_length))
x = self.x[index]
if self.use_keys:
output = {'input_ids': x}
if self.use_token_type_ids:
output['token_type_ids'] = torch.zeros_like(x)
if self.y is not None:
output['labels'] = self.y[index]
return output
else:
return x if self.y is None else (x, self.y[index])
class SimpleDataset(Dataset):
def __init__(self, size: int = 256, batch_size: int = 256, feature_size: int = 1, num_classes: int = 2):
self.size = size
self.batch_size = batch_size
self.feature_size = feature_size
self.num_classes = num_classes
self.x = None
self.y = None
def __len__(self):
return self.size
def __getitem__(self, index: int):
# Note: lazily generate data so it runs after Composer seeds everything, giving the same
# dataset across multiple calls when using the same seed.
if self.x is None:
self.x = torch.randn(self.size * self.batch_size, self.feature_size)
if self.y is None:
self.y = torch.randint(0, self.num_classes, size=(self.size * self.batch_size,), dtype=torch.long)
return self.x[index * self.batch_size:(index + 1) *
self.batch_size], self.y[index * self.batch_size:(index + 1) * self.batch_size]
def dummy_transformer_classifier_batch(vocab_size=100, num_classes=2):
sequence_length = 32
size = 8
batch_size = 8
train_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
return next(iter(train_dataloader))
def dummy_tiny_bert_classification_batch(num_classes=2):
vocab_size = 30522 # Match bert vocab size
sequence_length = 4
size = 8
batch_size = 8
train_dataset = RandomTextClassificationDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
num_classes=num_classes,
use_keys=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
batch = next(iter(train_dataloader))
return batch
def dummy_tiny_bert_lm_batch():
vocab_size = 30522 # Match bert vocab size
sequence_length = 4
size = 8
batch_size = 8
train_dataset = RandomTextLMDataset(size=size,
vocab_size=vocab_size,
sequence_length=sequence_length,
use_keys=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=dist.get_sampler(train_dataset))
batch = next(iter(train_dataloader))
return batch
def dummy_hf_lm_dataloader(size: int, vocab_size: int, sequence_length: int, collate_fn=None):
batch_size = 2
dataset = RandomTextLMDataset(size=size, vocab_size=vocab_size, sequence_length=sequence_length, use_keys=True)
dataloader = DataLoader(dataset, batch_size=batch_size, sampler=dist.get_sampler(dataset), collate_fn=collate_fn)
return dataloader
def dummy_bert_lm_dataloader(sequence_length=4, size=4):
transformers = pytest.importorskip('transformers')
tokenizer = configure_tiny_bert_tokenizer()
collate_fn = transformers.data.data_collator.DataCollatorForLanguageModeling(tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15)
return dummy_hf_lm_dataloader(vocab_size=30522, sequence_length=sequence_length, size=size, collate_fn=collate_fn)
def dummy_gpt_lm_dataloader(sequence_length=4, size=4):
transformers = pytest.importorskip('transformers')
tokenizer = configure_tiny_gpt2_tokenizer()
collate_fn = transformers.data.data_collator.DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
return dummy_hf_lm_dataloader(vocab_size=50257, sequence_length=sequence_length, size=size, collate_fn=collate_fn)
def dummy_text_classification_dataloader():
dataset = RandomTextClassificationDataset(size=8)
dataloader = DataLoader(dataset, batch_size=4, sampler=dist.get_sampler(dataset))
return dataloader
| composer-dev | tests/common/datasets.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import types
from typing import List, Type
from tests.common.compare import deep_compare
from tests.common.datasets import (InfiniteClassificationDataset, RandomClassificationDataset, RandomImageDataset,
RandomSegmentationDataset, RandomTextClassificationDataset, SimpleDataset)
from tests.common.events import EventCounterCallback
from tests.common.markers import device, world_size
from tests.common.models import (ConvModel, EmbeddedWeightTiedModel, SimpleConvModel, SimpleModel,
SimpleModelWithDropout, SimpleTransformerClassifier, SimpleWeightTiedModel)
from tests.common.state import assert_state_equivalent
def get_module_subclasses(module: types.ModuleType, cls: Type) -> List[Type]:
"""Get all implementations of a class in a __module__ by scanning the re-exports from __init__.py"""
return [x for x in vars(module).values() if isinstance(x, type) and issubclass(x, cls) and x is not cls]
__all__ = [
'assert_state_equivalent',
'RandomClassificationDataset',
'RandomTextClassificationDataset',
'RandomImageDataset',
'RandomSegmentationDataset',
'ConvModel',
'SimpleConvModel',
'SimpleModel',
'SimpleTransformerClassifier',
'EmbeddedWeightTiedModel',
'SimpleWeightTiedModel',
'EventCounterCallback',
'deep_compare',
'device',
'world_size',
'get_module_subclasses',
'SimpleModelWithDropout',
'SimpleDataset',
'InfiniteClassificationDataset',
]
| composer-dev | tests/common/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Pytest marker helpers."""
from typing import Callable
import pytest
from composer.core import Precision
def device(*args, precision=False):
"""Decorator for device and optionally precision.
Input choices are ('cpu', 'gpu'), or if precision=True,
also accept ('gpu-amp', 'gpu-fp32', and 'cpu-fp32').
Returns the parameter "device", or if precision=True,
also returns the parameter "precision".
"""
# convert cpu-fp32 and gpu-fp32 to cpu, gpu
if not precision and any(['-' in arg for arg in args]):
raise ValueError('-fp32 and -amp tags must be removed if precision=False')
args = [arg.replace('-fp32', '') for arg in args]
if precision:
devices = {
'cpu': pytest.param('cpu', Precision.FP32, id='cpu-fp32'),
'gpu': pytest.param('gpu', Precision.FP32, id='gpu-fp32', marks=pytest.mark.gpu),
'gpu-amp': pytest.param('gpu', Precision.AMP_FP16, id='gpu-amp', marks=pytest.mark.gpu)
}
name = 'device,precision'
else:
devices = {
'cpu': pytest.param('cpu', id='cpu'),
'gpu': pytest.param('gpu', id='gpu', marks=pytest.mark.gpu),
}
name = 'device'
parameters = [devices[arg] for arg in args]
def decorator(test):
if not parameters:
return test
return pytest.mark.parametrize(name, parameters)(test)
return decorator
def world_size(*world_sizes: int, param_name: str = 'world_size'):
"""Decorator to mark tests with a given world size. This helper automatically sets the `pytest.mark.world_size`
marker.
Args:
world_sizes (int): The world sizes.
param_name (str, optional): The parameter name for the `world_size` parameter. Defaults to ``'world_size'``.
Example:
>>> @world_size(1, 2)
def test_something(world_size: int):
...
"""
parameters = []
for world_size in world_sizes:
if world_size == 1:
parameters.append(pytest.param(1))
else:
parameters.append(pytest.param(2, marks=pytest.mark.world_size(2)))
def decorator(test: Callable):
if len(parameters) == 0:
return test
return pytest.mark.parametrize(param_name, parameters)(test)
return decorator
| composer-dev | tests/common/markers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import datetime
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import torch
import torchmetrics
from composer import Time
from composer.core.time import TimeUnit
def deep_compare(item1: Any, item2: Any, atol: float = 0.0, rtol: float = 0.0):
"""Compare two items recursively. Supports dicts, lists, tuples, tensors, numpy arrays, Composer Time objects, and callables.
Args:
item1 (Any): The first item
item2 (Any): The second item
atol (bool): Atol tolerance for torch tensors and numpy arrays (default: 0.0)
rtol (float): Rtol tolerance for torch tensors and numpy arrays (default: 0.0)
"""
return _check_item(item1, item2, path='', atol=atol, rtol=rtol)
def _check_item(item1: Any, item2: Any, path: str, rtol: float = 0.0, atol: float = 0.0):
if item1 is None:
assert item2 is None, f'{path} differs: {item1} != {item2}'
return
if isinstance(item1, (str, float, int, bool, Time, datetime.timedelta, TimeUnit)):
assert type(item1) == type(item2)
assert item1 == item2, f'{path} differs: {item1} != {item2}'
return
if isinstance(item1, torch.Tensor):
assert isinstance(item2, torch.Tensor)
assert item1.allclose(item2, rtol=rtol, atol=atol), f'{path} differs'
return
if isinstance(item1, np.ndarray):
assert isinstance(item2, np.ndarray)
assert np.allclose(item1, item2, rtol=0.1, atol=0.1), f'{path} differs'
return
if isinstance(item1, dict):
assert isinstance(item2, dict), f'{path} differs: {item1} != {item2}'
_check_dict_recursively(item1, item2, path, atol=atol, rtol=rtol)
return
if isinstance(item1, (tuple, list)):
assert isinstance(item2, type(item1)), f'{path} differs: {item1} != {item2}'
_check_list_recursively(item1, item2, path, atol=atol, rtol=rtol)
return
if isinstance(item1, torchmetrics.Metric):
assert isinstance(item2, torchmetrics.Metric), f'{path} differs: {item1} != {item2}'
assert item1.compute() == item2.compute(), f'{path} differs: {item1.compute()} != {item2.compute()}'
return
raise NotImplementedError(f'Unsupported item type: {type(item1)}')
def _check_list_recursively(
list1: Union[Tuple[Any], List[Any]],
list2: Union[Tuple[Any], List[Any]],
path: str,
atol: float,
rtol: float,
):
assert len(list1) == len(list2), f'{path} differs: {list1} != {list2}'
for i, (item1, item2) in enumerate(zip(list1, list2)):
_check_item(item1, item2, path=f'{path}/{i}', atol=atol, rtol=rtol)
def _check_dict_recursively(dict1: Dict[str, Any], dict2: Dict[str, Any], path: str, atol: float, rtol: float):
assert len(dict1) == len(dict2), f'{path} differs: {dict1} != {dict2}'
for k, val1 in dict1.items():
val2 = dict2[k]
_check_item(val1, val2, path=f'{path}/{k}', atol=atol, rtol=rtol)
| composer-dev | tests/common/compare.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Dict
from composer.core import State
from composer.utils import is_model_deepspeed
from tests.common.compare import deep_compare
def _del_wct_timestamp_fields(timestamp_state_dict: Dict[str, Any]):
del timestamp_state_dict['Timestamp']['total_wct']
del timestamp_state_dict['Timestamp']['epoch_wct']
del timestamp_state_dict['Timestamp']['batch_wct']
def assert_state_equivalent(state1: State, state2: State):
"""Assert that ``state1`` is equivalent to ``state2``, ignoring wall clock timestamp fields."""
assert state1.serialized_attributes == state2.serialized_attributes
assert is_model_deepspeed(state1.model) == is_model_deepspeed(state2.model)
# Using a loose tolerance for GPU states as GPU determinism does not work properly
is_gpu = next(state1.model.parameters()).device.type == 'cuda'
atol = 0.1 if is_gpu else 0.0
rtol = 0.1 if is_gpu else 0.0
state_dict_1 = state1.state_dict()
state_dict_2 = state2.state_dict()
# Remove any wall clock timestamp fields
_del_wct_timestamp_fields(state_dict_1['timestamp'])
_del_wct_timestamp_fields(state_dict_2['timestamp'])
# Remove run_name since we use timestamp as part of name
del state_dict_1['run_name']
del state_dict_2['run_name']
# Remove algorithm representations which are memory addresses
for i, algo_info in enumerate(state_dict_1['algorithms']):
if '0x' in algo_info[1]['repr']:
del state_dict_1['algorithms'][i]
for i, algo_info in enumerate(state_dict_2['algorithms']):
if '0x' in algo_info[1]['repr']:
del state_dict_2['algorithms'][i]
# Compare the state dicts
deep_compare(state_dict_1, state_dict_2, atol=atol, rtol=rtol)
| composer-dev | tests/common/state.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import pathlib
import pytest
import tqdm.std
import composer
from composer.devices import DeviceCPU, DeviceGPU
from composer.utils import dist, reproducibility
@pytest.fixture(autouse=True)
def disable_tokenizer_parallelism():
"""This fixture prevents the below warning from appearing in tests:
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
"""
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
@pytest.fixture(autouse=True)
def disable_wandb(monkeypatch: pytest.MonkeyPatch, request: pytest.FixtureRequest):
monkeypatch.setenv('WANDB_START_METHOD', 'thread')
if request.node.get_closest_marker('remote') is None:
monkeypatch.setenv('WANDB_MODE', 'offline')
else:
if not os.environ.get('WANDB_PROJECT'):
monkeypatch.setenv('WANDB_PROJECT', 'pytest')
@pytest.fixture(autouse=True, scope='session')
def configure_dist(request: pytest.FixtureRequest):
# Configure dist globally when the world size is greater than 1,
# so individual tests that do not use the trainer
# do not need to worry about manually configuring dist.
if dist.get_world_size() == 1:
return
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device is not None
if not dist.is_initialized():
dist.initialize_dist(device, timeout=300.0)
# Hold PyTest until all ranks have reached this barrier. Ensure that no rank starts
# any test before other ranks are ready to start it, which could be a cause of random timeouts
# (e.g. rank 1 starts the next test while rank 0 is finishing up the previous test).
dist.barrier()
@pytest.fixture(autouse=True)
def chdir_to_tmp_path(tmp_path: pathlib.Path):
os.chdir(tmp_path)
@pytest.fixture(autouse=True, scope='session')
def disable_tqdm_bars():
# Disable tqdm progress bars globally in tests
original_tqdm_init = tqdm.std.tqdm.__init__
def new_tqdm_init(*args, **kwargs):
if 'disable' not in kwargs:
kwargs['disable'] = True
return original_tqdm_init(*args, **kwargs)
# Not using pytest monkeypatch as it is a function-scoped fixture
tqdm.std.tqdm.__init__ = new_tqdm_init
@pytest.fixture(autouse=True)
def set_loglevels():
"""Ensures all log levels are set to DEBUG."""
logging.basicConfig()
logging.getLogger(composer.__name__).setLevel(logging.DEBUG)
@pytest.fixture(autouse=True)
def seed_all(rank_zero_seed: int, monkeypatch: pytest.MonkeyPatch):
"""Monkeypatch reproducibility get_random_seed to always return the rank zero seed, and set the random seed before
each test to the rank local seed."""
monkeypatch.setattr(reproducibility, 'get_random_seed', lambda: rank_zero_seed)
reproducibility.seed_all(rank_zero_seed + dist.get_global_rank())
| composer-dev | tests/fixtures/autouse_fixtures.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/fixtures/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""These fixtures are shared globally across the test suite."""
import copy
import time
import coolname
import pytest
import torch
from torch.utils.data import DataLoader
from composer.core import State
from composer.devices import DeviceCPU, DeviceGPU
from composer.loggers import Logger
from composer.utils import dist
from tests.common import RandomClassificationDataset, SimpleModel
from tests.conftest import _get_option
@pytest.fixture
def rank_zero_seed(pytestconfig: pytest.Config) -> int:
"""Read the rank_zero_seed from the CLI option."""
seed = _get_option(pytestconfig, 'seed', default='0')
return int(seed)
@pytest.fixture
def minimal_state(rank_zero_seed: int, request: pytest.FixtureRequest):
"""Most minimally defined state possible.
Tests should configure the state for their specific needs.
"""
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
return State(
model=SimpleModel(),
run_name='minimal_run_name',
device=device,
rank_zero_seed=rank_zero_seed,
max_duration='100ep',
dataloader=DataLoader(RandomClassificationDataset()),
dataloader_label='train',
)
@pytest.fixture()
def dummy_state(
rank_zero_seed: int,
request: pytest.FixtureRequest,
) -> State:
model = SimpleModel()
if request.node.get_closest_marker('gpu') is not None:
# If using `dummy_state`, then not using the trainer, so move the model to the correct device
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1.0)
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(
model=model,
run_name='dummy_run_name',
device=device,
precision='fp32',
device_train_microbatch_size=1,
rank_zero_seed=rank_zero_seed,
optimizers=optimizer,
max_duration='10ep',
)
state.schedulers = scheduler
state.set_dataloader(DataLoader(RandomClassificationDataset()), 'train')
return state
@pytest.fixture
def empty_logger(minimal_state: State) -> Logger:
"""Logger without any output configured."""
return Logger(state=minimal_state, destinations=[])
@pytest.fixture(scope='session')
def test_session_name(configure_dist: None) -> str:
"""Generate a random name for the test session that is the same on all ranks."""
del configure_dist # unused
generated_session_name = str(int(time.time())) + '-' + coolname.generate_slug(2)
name_list = [generated_session_name]
# ensure all ranks have the same name
dist.broadcast_object_list(name_list)
return name_list[0]
@pytest.fixture
def sftp_uri():
return 'sftp://localhost'
@pytest.fixture
def s3_bucket(request: pytest.FixtureRequest):
if request.node.get_closest_marker('remote') is None:
return 'my-bucket'
else:
return _get_option(request.config, 's3_bucket')
# Note: These session scoped fixtures should not be used directly in tests, but the non session scoped fixtures
# below should be used instead. This is because the session scoped fixtures return the same object to every
# test that requests it, so tests would have side effects on each other. Instead, the non session
# scoped fixtures below perform a deepcopy before returning the fixture.
def tiny_bert_model_helper(config):
transformers = pytest.importorskip('transformers')
return transformers.AutoModelForMaskedLM.from_config(config) # type: ignore (thirdparty)
@pytest.fixture(scope='session')
def _session_tiny_bert_model(_session_tiny_bert_config): # type: ignore
return tiny_bert_model_helper(_session_tiny_bert_config)
def tiny_bert_tokenizer_helper():
transformers = pytest.importorskip('transformers')
return transformers.AutoTokenizer.from_pretrained('bert-base-uncased')
@pytest.fixture(scope='session')
def _session_tiny_bert_tokenizer(): # type: ignore
return tiny_bert_tokenizer_helper()
def tiny_bert_config_helper():
transformers = pytest.importorskip('transformers')
tiny_overrides = {
'hidden_size': 128,
'num_attention_heads': 2,
'num_hidden_layers': 2,
'intermediate_size': 512,
}
return transformers.AutoConfig.from_pretrained('bert-base-uncased', **tiny_overrides)
@pytest.fixture(scope='session')
def _session_tiny_bert_config(): # type: ignore
return tiny_bert_config_helper()
def tiny_gpt2_model_helper(config):
transformers = pytest.importorskip('transformers')
return transformers.AutoModelForCausalLM.from_config(config)
@pytest.fixture(scope='session')
def _session_tiny_gpt2_model(_session_tiny_gpt2_config): # type: ignore
return tiny_gpt2_model_helper(_session_tiny_gpt2_config)
def tiny_gpt2_config_helper():
transformers = pytest.importorskip('transformers')
tiny_overrides = {
'n_embd': 2,
'n_head': 2,
'n_layer': 2,
'vocab_size': 50258 # 50257 + 1 for pad token
}
return transformers.AutoConfig.from_pretrained('gpt2', **tiny_overrides)
@pytest.fixture(scope='session')
def _session_tiny_gpt2_config(): # type: ignore
return tiny_gpt2_config_helper()
def tiny_gpt2_tokenizer_helper():
transformers = pytest.importorskip('transformers')
hf_tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
hf_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
return hf_tokenizer
@pytest.fixture(scope='session')
def _session_tiny_gpt2_tokenizer(): # type: ignore
return tiny_gpt2_tokenizer_helper()
def tiny_t5_config_helper():
transformers = pytest.importorskip('transformers')
tiny_overrides = {'d_ff': 128, 'd_model': 64, 'num_layers': 2, 'num_decoder_layers': 2, 'num_heads': 2}
return transformers.AutoConfig.from_pretrained('t5-small', **tiny_overrides)
@pytest.fixture(scope='session')
def _session_tiny_t5_config(): # type: ignore
return tiny_t5_config_helper()
def tiny_t5_tokenizer_helper():
transformers = pytest.importorskip('transformers')
hf_tokenizer = transformers.AutoTokenizer.from_pretrained('t5-small', model_max_length=512)
return hf_tokenizer
@pytest.fixture(scope='session')
def _session_tiny_t5_tokenizer(): # type: ignore
return tiny_t5_tokenizer_helper()
def tiny_t5_model_helper(config):
transformers = pytest.importorskip('transformers')
return transformers.T5ForConditionalGeneration(config=config)
@pytest.fixture(scope='session')
def _session_tiny_t5_model(_session_tiny_t5_config): # type: ignore
return tiny_t5_model_helper(_session_tiny_t5_config)
@pytest.fixture
def tiny_bert_model(_session_tiny_bert_model):
return copy.deepcopy(_session_tiny_bert_model)
@pytest.fixture
def tiny_bert_tokenizer(_session_tiny_bert_tokenizer):
return copy.deepcopy(_session_tiny_bert_tokenizer)
@pytest.fixture
def tiny_bert_config(_session_tiny_bert_config):
return copy.deepcopy(_session_tiny_bert_config)
@pytest.fixture
def tiny_gpt2_config(_session_tiny_gpt2_config):
return copy.deepcopy(_session_tiny_gpt2_config)
@pytest.fixture
def tiny_gpt2_tokenizer(_session_tiny_gpt2_tokenizer):
return copy.deepcopy(_session_tiny_gpt2_tokenizer)
@pytest.fixture
def tiny_gpt2_model(_session_tiny_gpt2_model):
return copy.deepcopy(_session_tiny_gpt2_model)
@pytest.fixture
def tiny_t5_config(_session_tiny_t5_config):
return copy.deepcopy(_session_tiny_t5_config)
@pytest.fixture
def tiny_t5_tokenizer(_session_tiny_t5_tokenizer):
return copy.deepcopy(_session_tiny_t5_tokenizer)
@pytest.fixture
def tiny_t5_model(_session_tiny_t5_model):
return copy.deepcopy(_session_tiny_t5_model)
| composer-dev | tests/fixtures/fixtures.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/profiler/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import MagicMock
import pytest
from composer.core import State
from composer.profiler import Profiler, ProfilerAction, SystemProfiler, TorchProfiler, cyclic_schedule
@pytest.mark.parametrize('repeat', [1, 0])
def test_cyclic_schedule(dummy_state: State, repeat: int):
# tests that get_action works correctly given the state
skip_first = 1
wait = 2
warmup = 3
active = 4
schedule = cyclic_schedule(skip_first=1, wait=2, warmup=3, active=4, repeat=repeat)
assert schedule(dummy_state) == ProfilerAction.SKIP # skip first epoch
for _ in range(skip_first):
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
assert schedule(dummy_state) == ProfilerAction.SKIP
for _ in range(wait):
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
assert schedule(dummy_state) == ProfilerAction.WARMUP
for _ in range(warmup):
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
assert schedule(dummy_state) == ProfilerAction.ACTIVE
for _ in range(active + wait + warmup):
dummy_state.timestamp = dummy_state.timestamp.to_next_batch()
if repeat == 0:
assert schedule(dummy_state) == ProfilerAction.ACTIVE
else:
assert schedule(dummy_state) == ProfilerAction.SKIP
def test_profiler_init(minimal_state: State):
# Construct a profiler and assert that it created the correct callbacks from the arguments
mock_trace_handler = MagicMock()
profiler = Profiler(
trace_handlers=[mock_trace_handler],
schedule=cyclic_schedule(),
torch_prof_profile_memory=True,
sys_prof_cpu=True,
)
profiler.bind_to_state(minimal_state)
assert any(isinstance(cb, TorchProfiler) for cb in minimal_state.callbacks)
assert any(isinstance(cb, SystemProfiler) for cb in minimal_state.callbacks)
def test_marker(dummy_state: State):
mock_trace_handler = MagicMock()
profiler = Profiler(
trace_handlers=[mock_trace_handler],
schedule=cyclic_schedule(),
)
profiler.bind_to_state(dummy_state)
dummy_state.profiler = profiler
marker = profiler.marker('name',
actions=[ProfilerAction.SKIP, ProfilerAction.WARMUP, ProfilerAction.ACTIVE],
categories=['cat1'])
marker.start() # call #1
with pytest.raises(RuntimeError):
marker.start() # cannot call start twice without finishing
marker.finish() # call #2
with pytest.raises(RuntimeError):
marker.finish() # cannot call finish twice without a start before
with marker:
pass # call #3 and #4
@marker
def func_to_profile(foo: str):
assert foo == 'hi'
func_to_profile(foo='hi') # call 5 and 6
@marker()
def func_to_profile2(bar: int):
assert bar == 6
func_to_profile2(bar=6) # call 7 and 8
marker.instant()
assert mock_trace_handler.process_duration_event.call_count == 8
assert mock_trace_handler.process_instant_event.call_count == 1
| composer-dev | tests/profiler/test_profiler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import json
import os
import pathlib
import pytest
from torch.utils.data import DataLoader
from composer.profiler import Profiler
from composer.profiler.json_trace_handler import JSONTraceHandler
from composer.profiler.profiler_schedule import cyclic_schedule
from composer.trainer import Trainer
from tests.common import RandomClassificationDataset, SimpleModel
# This test shouldn't run with the Torch profiler enabled, not providing a model or data can cause a seg fault
@pytest.mark.filterwarnings(
r'ignore:The profiler is enabled\. Using the profiler adds additional overhead when training\.:UserWarning')
def test_json_trace_profiler_handler(tmp_path: pathlib.Path):
# Construct the trainer
profiler = Profiler(
schedule=cyclic_schedule(wait=0, warmup=0, active=1000, repeat=0),
trace_handlers=[JSONTraceHandler(
folder=str(tmp_path),
merged_trace_filename='trace.json',
)],
sys_prof_cpu=False,
sys_prof_net=False,
sys_prof_disk=False,
sys_prof_memory=False,
torch_prof_record_shapes=False,
torch_prof_profile_memory=False,
torch_prof_with_stack=False,
torch_prof_with_flops=False,
)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(RandomClassificationDataset()),
max_duration='2ep',
profiler=profiler,
)
# Train
trainer.fit()
# Validate that the trace file contains expected events
profiler_file = os.path.join(tmp_path, 'trace.json')
with open(profiler_file, 'r') as f:
trace_json = json.load(f)
has_epoch_start_event = False
has_epoch_end_event = False
for event in trace_json:
if event['name'] == 'event/epoch' and event['ph'] == 'B':
has_epoch_start_event = True
if event['name'] == 'event/epoch' and event['ph'] == 'E':
has_epoch_end_event = True
assert has_epoch_start_event
assert has_epoch_end_event
| composer-dev | tests/profiler/test_json_trace_handler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List
import numpy as np
import pytest
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from composer import Trainer
from composer.core import Callback, PyTorchScheduler, State, TimeUnit
from composer.loggers.logger import Logger
from composer.optim import MultiStepScheduler
from composer.trainer._scale_schedule import scale_pytorch_scheduler
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
@pytest.fixture
def optimizer():
return torch.optim.SGD(SimpleModel().parameters(), lr=1.0)
def flatten(lst: list):
return [x for sublst in lst for x in sublst]
@pytest.mark.parametrize('ssr', [0.5, 0.75, 1.0])
@pytest.mark.filterwarnings(r'ignore:.*Detected call of \`lr_schedule.*:UserWarning')
class TestScaleSchedule():
@staticmethod
def _test(targets: List[float], scheduler: PyTorchScheduler, epochs: int, optimizer: Optimizer, ssr: float):
scale_pytorch_scheduler(scheduler, ssr)
for epoch in range(epochs):
for param_group in optimizer.param_groups:
torch.testing.assert_close(targets[epoch], param_group['lr'])
scheduler.step()
def test_scale_schedule_step_lr(self, optimizer: Optimizer, ssr: float):
epochs = int(9 * ssr)
step_size = int(3 * ssr)
gamma = 0.1
targets = flatten([[1.0 * (gamma**n)] * step_size for n in range(30)])
targets = targets[:epochs]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
self._test(targets, scheduler, epochs, optimizer, ssr)
def test_scale_schedule_multistep_lr(self, optimizer: Optimizer, ssr: float):
epochs = int(9 * ssr)
milestones = np.diff([0, int(2 * ssr), int(7 * ssr), epochs])
gamma = 0.1
targets = flatten([[1.0 * (gamma**n)] * ms for n, ms in enumerate(milestones)])
targets = targets[:epochs]
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2, 7], gamma)
self._test(targets, scheduler, epochs, optimizer, ssr)
def test_scale_schedule_exponential(self, optimizer: Optimizer, ssr: float):
epochs = int(9 * ssr)
targets = [1.0 * 0.1**(x / ssr) for x in range(epochs)]
scheduler = ExponentialLR(optimizer, gamma=0.1)
self._test(targets, scheduler, epochs, optimizer, ssr)
@pytest.mark.xfail
def test_scale_schedule_cosine(self, optimizer: Optimizer, ssr: float):
raise NotImplementedError
@pytest.mark.xfail
def test_scale_schedule_cosine_warm_restarts(self, optimizer: Optimizer, ssr: float):
raise NotImplementedError
class CheckScaleSchedule(Callback):
def __init__(self, ssr: float) -> None:
self.ssr = ssr
def fit_start(self, state: State, logger: Logger) -> None:
scheduler = state.schedulers[0]
test_steps = [int(20 * self.ssr), int(40 * self.ssr), int(60 * self.ssr)]
target_lrs = [1.0, 0.1, 0.01]
current_step = 0
for test_step, target_lr in zip(test_steps, target_lrs):
while current_step < test_step:
state.timestamp = state.timestamp.to_next_batch()
current_step += 1
scheduler.step()
assert scheduler.get_last_lr()[0] == pytest.approx(target_lr)
@pytest.mark.parametrize('ssr', [0.5, 0.75, 1.0])
class TestScaleScheduleTrainer():
@pytest.mark.filterwarnings(r'ignore:.*Detected call of \`lr_schedule.*:UserWarning')
def test_epochs_scaled(
self,
ssr: float,
):
model = SimpleModel()
optimizers = torch.optim.SGD(model.parameters(), lr=1.0)
trainer = Trainer(
model=model,
train_dataloader=DataLoader(RandomClassificationDataset()),
optimizers=optimizers,
schedulers=[MultiStepScheduler(milestones=['30ba', '50ba'], gamma=0.1)],
scale_schedule_ratio=ssr,
callbacks=[CheckScaleSchedule(ssr)],
max_duration='10ep',
)
trainer.state.train_metrics = {} # avoid metrics construction
assert trainer.state.max_duration is not None
assert trainer.state.max_duration.unit == TimeUnit.EPOCH
assert trainer.state.max_duration.value == int(10 * ssr)
trainer.fit()
| composer-dev | tests/trainer/test_scale_schedule.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from packaging import version
from torch.utils.data import DataLoader
from composer.models import ComposerClassifier
from composer.trainer.trainer import Trainer
from composer.utils import dist
from tests.common import EmbeddedWeightTiedModel, RandomClassificationDataset, SimpleWeightTiedModel
@pytest.mark.parametrize('model', [SimpleWeightTiedModel, EmbeddedWeightTiedModel])
@pytest.mark.parametrize('mixed_precision', ['FULL', 'DEFAULT', 'PURE'])
@pytest.mark.parametrize('device', ['cpu', 'meta'])
@pytest.mark.parametrize('reentrant', [True, False])
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.gpu
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_device_initialization(model: ComposerClassifier, mixed_precision: str, device: str, reentrant: bool):
"""test FSDP device initialization for a simple model with weight tying and a model where two modules
from separate submodules have weight tying applied. This test also covers both 'cpu' and
'meta' devices. This is because 'meta' will result in deferred initialization until FSDP is initialized
"""
num_classes = 10
model = model(num_features=num_classes, device=device)
dataset = RandomClassificationDataset(shape=(num_classes,), size=2, num_classes=num_classes)
dataloader = DataLoader(dataset, sampler=dist.get_sampler(dataset))
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
trainer = Trainer(
model=model,
optimizers=optimizer,
train_dataloader=dataloader,
fsdp_config={
'activation_checkpointing_reentrant': reentrant,
'mixed_precision': mixed_precision
},
max_duration='3ba',
)
trainer.fit()
if isinstance(model, SimpleWeightTiedModel):
with trainer.state.model.module.summon_full_params(trainer.state.model.module): # type: ignore
weight_1 = model.mlp.fc1.weight
weight_2 = model.mlp.fc2.weight
assert (id(weight_1) == id(weight_2))
assert (torch.equal(weight_1, weight_2))
if isinstance(model, EmbeddedWeightTiedModel):
with trainer.state.model.module.summon_full_params(trainer.state.model.module): # type: ignore
weight_1 = model.net1.fc1.weight
weight_2 = model.net2.fc1.weight
assert (id(weight_1) == id(weight_2))
assert (torch.equal(weight_1, weight_2))
| composer-dev | tests/trainer/test_fsdp.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import pytest
import torch
import torch.distributed
from packaging import version
from torch.utils.data import DataLoader
import composer.core.types as types
from composer import Callback, Event
from composer.core import State
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.loggers import Logger
from composer.trainer.trainer import Trainer
from composer.utils import dist
from tests.common import SimpleModel
def get_file_path(*, is_train: bool, tmp_path: pathlib.Path) -> str:
train_str = 'train' if is_train else 'val'
file_path = os.path.join(tmp_path, f'{train_str}_num_accesses')
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def get_batch_file_path(*, epoch: int, is_train: bool, tmp_path: pathlib.Path) -> str:
train_str = 'train' if is_train else 'val'
file_path = os.path.join(tmp_path, f'{train_str}-epoch-{epoch}-batch0.pt')
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
class TrackedDataset(types.Dataset):
"""TrackedDataset atomically writes a file every time a record is accessed.
It is thread-safe and subprocess-safe, and is useful to measure how many times a sample is accessed. Because of
atomic file writes, it is slow and should not be used in any performance measurements.
"""
def __init__(self, is_train: bool, synthetic_dataset: SyntheticBatchPairDataset, tmp_path: pathlib.Path):
self.dataset = synthetic_dataset
self.is_train = is_train
self.tmp_path = tmp_path
self.counter = 0
def __getitem__(self, idx: int):
self.counter += 1
with open(get_file_path(tmp_path=self.tmp_path, is_train=self.is_train), 'w+') as f:
f.write(str(self.counter))
return self.dataset[idx]
def __len__(self):
return len(self.dataset)
class CheckBatch0(Callback):
def __init__(self, tmp_path: pathlib.Path):
self.tmp_path = tmp_path
def run_event(self, event: Event, state: State, logger: Logger) -> None:
if event in (Event.BEFORE_FORWARD, Event.EVAL_BEFORE_FORWARD):
filepath = get_batch_file_path(
epoch=int(state.timestamp.epoch),
is_train=state.model.training,
tmp_path=self.tmp_path,
)
if os.path.exists(filepath):
return
last_input, last_target = state.batch
torch.save(
{
'last_input': last_input,
'last_target': last_target,
},
filepath,
)
@pytest.mark.parametrize(
'device,deepspeed,fsdp',
[
pytest.param('cpu', False, False, id='cpu'),
pytest.param('gpu', False, False, id='gpu', marks=pytest.mark.gpu),
# TODO: Remove filterwarnings after FSDP removes deprecated code
pytest.param('gpu', True, False, id='deepspeed', marks=pytest.mark.gpu),
pytest.param('gpu',
False,
True,
id='fsdp',
marks=[
pytest.mark.gpu,
pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher'),
pytest.mark.filterwarnings('ignore::UserWarning'),
]),
])
@pytest.mark.parametrize('world_size', [
pytest.param(1),
pytest.param(2, marks=pytest.mark.world_size(2)),
])
def test_ddp(device: str, world_size: int, deepspeed: bool, fsdp: bool, tmp_path: pathlib.Path) -> None:
"""test strategy for ddp: 1) Train a dummy model on two gps, for two epochs, using the tracked dataset. 2) The
tracked dataset should record two -- and only two -- accesses for each sample -- one for each epoch If each sample
is accessed more than this number of times, then the distributed sampler isn't working properly If each sample is
accessed less than this number of times, then either the sample pool size isn't a multiple of the batch size (and
samples are getting dropped), or not all processes are working 3) We use a callback to save the (x, y) for the first
batch in each epoch on each process.
({train, eval} * {epoch 1, epoch 2} * {ddp 1, ddp2})
We assert that each of these tensors are different to ensure that 1) random seeding works properly,
and 2) each ddp process is indeed getting different data.
"""
model = SimpleModel(num_classes=100)
train_batch_size = 10
train_subset_num_batches = 3
synthetic_dataset = SyntheticBatchPairDataset(
num_unique_samples_to_create=train_batch_size * train_subset_num_batches,
total_dataset_size=10_000,
data_shape=(model.num_features, 5, 5),
num_classes=model.num_classes,
)
train_dataset = TrackedDataset(
synthetic_dataset=synthetic_dataset,
is_train=True,
tmp_path=tmp_path,
)
train_dataloader = DataLoader(
dataset=train_dataset,
num_workers=0,
prefetch_factor=2,
persistent_workers=False,
pin_memory=False,
timeout=0.0,
batch_size=train_batch_size // dist.get_world_size(),
sampler=dist.get_sampler(
train_dataset,
drop_last=False,
shuffle=True,
),
)
eval_batch_size = 10
eval_subset_num_batches = 3
eval_dataset = SyntheticBatchPairDataset(
num_unique_samples_to_create=eval_batch_size * eval_subset_num_batches,
total_dataset_size=10_000,
data_shape=(model.num_features, 5, 5),
num_classes=model.num_classes,
)
eval_dataset = TrackedDataset(
synthetic_dataset=eval_dataset,
is_train=False,
tmp_path=tmp_path,
)
eval_dataloader = DataLoader(
dataset=eval_dataset,
batch_size=eval_batch_size // dist.get_world_size(),
sampler=dist.get_sampler(
eval_dataset,
drop_last=False,
shuffle=True,
),
)
fsdp_config = None
if fsdp:
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
'min_params': 1e8,
'cpu_offload': False,
'mixed_precision': 'PURE',
'backward_prefetch': 'BACKWARD_PRE',
'activation_checkpointing': False,
'activation_cpu_offload': False,
'verbose': False
}
max_epochs = 2
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
device=device,
max_duration=f'{max_epochs}ep',
eval_interval='1ep',
eval_subset_num_batches=eval_subset_num_batches,
train_subset_num_batches=train_subset_num_batches,
deepspeed_config={} if deepspeed else None,
fsdp_config=fsdp_config,
callbacks=[CheckBatch0(tmp_path)])
trainer.fit()
expected_train_samples = max_epochs * train_batch_size * train_subset_num_batches
expected_val_samples = max_epochs * eval_batch_size * eval_subset_num_batches
# account for extra spin to create deterministic ordering
expected_val_samples += eval_batch_size
actual_train_samples = _read_tracked_results(tmp_path, is_train=True)
actual_val_samples = _read_tracked_results(tmp_path, is_train=False)
assert expected_train_samples == actual_train_samples
assert expected_val_samples == actual_val_samples
if not deepspeed:
_assert_inputs_different(tmp_path, max_epochs, is_train=True)
_assert_inputs_different(tmp_path, max_epochs, is_train=False)
def _read_tracked_results(path, is_train):
# get all paths across ranks
paths = [pathlib.Path(p) for p in dist.all_gather_object(str(path))]
counter = 0
for p in paths:
with open(get_file_path(is_train=is_train, tmp_path=p), 'r') as f:
counter += int(f.read())
return counter
def _assert_inputs_different(tmp_path, max_epochs, is_train):
"""Checks that each rank's dataloader input is different."""
inputs = []
targets = []
for epoch in range(max_epochs):
file_path = get_batch_file_path(
epoch=epoch if is_train else epoch + 1, # val is 1 ahead
is_train=is_train,
tmp_path=tmp_path,
)
state_dict = torch.load(file_path, map_location='cpu')
for input in inputs:
if torch.allclose(state_dict['last_input'], input):
raise ValueError(f'Tensors equal for epoch {epoch}, rank {dist.get_global_rank()}')
for target in targets:
if torch.allclose(state_dict['last_target'], target):
raise ValueError(f'Tensors equal for epoch {epoch}, rank {dist.get_global_rank()}')
inputs.append(state_dict['last_input'])
targets.append(state_dict['last_target'])
| composer-dev | tests/trainer/test_ddp.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
from typing import Callable, Optional, Union
import pytest
from torch.utils.data import DataLoader
from composer.core import Algorithm, Event
from composer.core.evaluator import Evaluator, evaluate_periodically
from composer.core.state import State
from composer.core.time import Time, TimeUnit
from composer.trainer import Trainer
from composer.utils import dist
from tests.common import EventCounterCallback, RandomClassificationDataset, SimpleModel
def test_eval():
# Construct the trainer
dataset = RandomClassificationDataset()
trainer = Trainer(
eval_dataloader=DataLoader(
dataset=dataset,
sampler=dist.get_sampler(dataset),
),
model=SimpleModel(),
)
# Evaluate the model
trainer.eval()
# Assert that there is some accuracy
assert trainer.state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
def test_eval_call():
# Construct the trainer
trainer = Trainer(model=SimpleModel(),)
# Evaluate the model
dataset = RandomClassificationDataset()
trainer.eval(eval_dataloader=DataLoader(
dataset=dataset,
sampler=dist.get_sampler(dataset),
))
# Assert that there is some accuracy
assert trainer.state.eval_metrics['eval']['MulticlassAccuracy'].compute() != 0.0
def test_eval_call_with_trainer_evaluators():
trainer_dataset = RandomClassificationDataset()
trainer_evaluator = Evaluator(label='trainer',
dataloader=DataLoader(
dataset=trainer_dataset,
sampler=dist.get_sampler(trainer_dataset),
))
eval_call_dataset = RandomClassificationDataset()
eval_call_evaluator = Evaluator(label='eval_call',
dataloader=DataLoader(dataset=eval_call_dataset,
sampler=dist.get_sampler(eval_call_dataset)))
# Construct the trainer
trainer = Trainer(model=SimpleModel(), eval_dataloader=trainer_evaluator)
# Empty eval call.
trainer.eval()
# Check trainer_evaluator is not deleted.
assert trainer_evaluator in trainer.state.evaluators
# Eval call with an evaluator passed.
trainer.eval(eval_dataloader=eval_call_evaluator)
# Evaluators passed to constructor permanently reside in trainer.state.evaluators.
# Check trainer_evaluator is NOT deleted.
assert trainer_evaluator in trainer.state.evaluators
# Evaluators passed to eval temporarily reside in trainer.state.evaluators for the duration
# of evaluation.
# Check eval_call_evaluator IS deleted.
assert eval_call_evaluator not in trainer.state.evaluators
@pytest.mark.parametrize('evaluator_on_init,subset_on_init', [[True, True], [True, False], [False, False]])
def test_trainer_eval_subset_num_batches(evaluator_on_init: bool, subset_on_init: bool):
dataset = RandomClassificationDataset()
eval_dataloader = DataLoader(
dataset=dataset,
sampler=dist.get_sampler(dataset),
)
# Construct the trainer
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
callbacks=[event_counter_callback],
eval_dataloader=eval_dataloader if evaluator_on_init else None,
eval_subset_num_batches=1 if subset_on_init else -1,
)
# Evaluate the model
trainer.eval(
eval_dataloader=eval_dataloader if not evaluator_on_init else None,
subset_num_batches=1 if not subset_on_init else -1,
)
# Ensure that just one batch was evaluated
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == 1
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == 1
@pytest.mark.filterwarnings(r'ignore:eval_dataloader label:UserWarning')
def test_trainer_eval_timestamp():
# Construct the trainer
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
callbacks=[event_counter_callback],
)
# Evaluate the model
dataset = RandomClassificationDataset()
eval_dataloader = DataLoader(
dataset=dataset,
sampler=dist.get_sampler(dataset),
)
trainer.eval(eval_dataloader=eval_dataloader)
# Ensure that the eval timestamp matches the number of evaluation events
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == trainer.state.eval_timestamp.batch
assert trainer.state.eval_timestamp.batch == trainer.state.eval_timestamp.batch_in_epoch
# Ensure that if we eval again, the eval timestamp was reset
# Reset the event counter callback
event_counter_callback.event_to_num_calls = {k: 0 for k in event_counter_callback.event_to_num_calls}
# Eval again
trainer.eval(eval_dataloader=eval_dataloader)
# Validate the same invariants
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == trainer.state.eval_timestamp.batch
assert trainer.state.eval_timestamp.batch == trainer.state.eval_timestamp.batch_in_epoch
@pytest.mark.parametrize(('eval_interval', 'max_duration', 'eval_at_fit_end', 'expected_eval_start_calls',
'expected_eval_batch_start_calls'), [
(1, '5ep', True, 4, 4),
(Time(2, TimeUnit.EPOCH), '8ep', False, 4, 4),
(Time(10, TimeUnit.BATCH), '8ep', False, 4, 4),
(Time(0.25, TimeUnit.DURATION), '4ep', False, 4, 4),
('1ep', '4ep', True, 3, 3),
('5ba', '4ep', False, 4, 4),
('5ba', '10ba', False, 2, 2),
('0.35dur', '4ep', True, 2, 2),
('0.01dur', '100ba', False, 100, 100),
('0.10dur', '70sp', True, 9, 9),
('0.05dur', '80sp', False, 20, 20),
])
def test_eval_at_fit_end(eval_interval: Union[str, Time, int], max_duration: str, eval_at_fit_end: bool,
expected_eval_start_calls: int, expected_eval_batch_start_calls: int):
"""Test the `eval_subset_num_batches` and `eval_interval` works when specified on init."""
# Construct the trainer
train_dataset = RandomClassificationDataset(size=10)
train_dataloader = DataLoader(
dataset=train_dataset,
batch_size=2,
sampler=dist.get_sampler(train_dataset),
)
event_counter_callback = EventCounterCallback()
eval_interval = eval_interval
eval_dataset = RandomClassificationDataset(size=10)
evaluator = Evaluator(
label='eval',
dataloader=DataLoader(
dataset=eval_dataset,
sampler=dist.get_sampler(eval_dataset),
),
metric_names=['MulticlassAccuracy'],
)
evaluator.eval_interval = evaluate_periodically(
eval_interval=eval_interval,
eval_at_fit_end=eval_at_fit_end,
)
trainer = Trainer(
model=SimpleModel(),
train_dataloader=train_dataloader,
eval_dataloader=evaluator,
eval_subset_num_batches=1,
max_duration=max_duration,
callbacks=[event_counter_callback],
)
# Train (should evaluate once)
trainer.fit()
# depending on eval_at_fit_end, ensure the appropriate amount of calls are invoked
if eval_at_fit_end:
# we should have one extra call from eval_at_fit_end
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == expected_eval_start_calls + 1
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == expected_eval_batch_start_calls + 1
else:
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == expected_eval_start_calls
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == expected_eval_batch_start_calls
def _get_classification_dataloader():
dataset = RandomClassificationDataset()
return DataLoader(dataset, sampler=dist.get_sampler(dataset))
@pytest.mark.parametrize('eval_dataloader', [
_get_classification_dataloader(),
Evaluator(
label='eval',
dataloader=_get_classification_dataloader(),
metric_names=['MulticlassAccuracy'],
),
])
@pytest.mark.parametrize(
'eval_interval',
[ # multiple ways of specifying to evaluate once every epoch
1,
'1ep',
Time(1, TimeUnit.EPOCH),
lambda state, event: event == Event.EPOCH_END,
])
def test_eval_params_init(
eval_dataloader: Union[DataLoader, Evaluator],
eval_interval: Union[Time, str, int, Callable[[State, Event], bool]],
):
"""Test the `eval_subset_num_batches` and `eval_interval` works when specified on init."""
# Construct the trainer
train_dataset = RandomClassificationDataset()
train_dataloader = DataLoader(train_dataset, sampler=dist.get_sampler(train_dataset))
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
eval_subset_num_batches=1,
max_duration='1ep',
callbacks=[event_counter_callback],
eval_interval=eval_interval,
)
# Train (should evaluate once)
trainer.fit()
# Assert that the evaluator was indeed called only once
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == 1
assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == 1
def test_eval_params_evaluator():
"""Test the `eval_subset_num_batches` and `eval_interval` works when specified as part of an evaluator."""
# Construct the trainer
train_dataset = RandomClassificationDataset()
train_dataloader = DataLoader(train_dataset, sampler=dist.get_sampler(train_dataset))
eval_interval_batches = 1
eval_subset_num_batches = 2
eval_dataset = RandomClassificationDataset()
eval_dataloader = Evaluator(
label='eval',
dataloader=DataLoader(
dataset=eval_dataset,
sampler=dist.get_sampler(eval_dataset),
),
metric_names=['MulticlassAccuracy'],
eval_interval=f'{eval_interval_batches}ba',
subset_num_batches=eval_subset_num_batches,
)
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='1ep',
callbacks=[event_counter_callback],
# These parameters should be ignored since `subset_num_batches` is specified as part of the Evaluator
eval_subset_num_batches=1,
eval_interval='1ep',
)
# Train the model (should evaluate once every batch)
trainer.fit()
# Assert that the evaluator ran once every batch
# (and not the `eval_interval` as specified for the Trainer)
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == trainer.state.timestamp.batch
assert event_counter_callback.event_to_num_calls[
Event.EVAL_BATCH_START] == eval_subset_num_batches * trainer.state.timestamp.batch
class InfiniteDataloader(DataLoader):
"""Infinite dataloader that never raises StopIteration."""
def __iter__(self):
while True:
for batch in super().__iter__():
yield batch
def __len__(self) -> Optional[int]:
return None
@pytest.mark.parametrize('eval_subset_num_batches,success', [[None, False], [-1, False], [1, True]])
def test_infinite_eval_dataloader(eval_subset_num_batches, success):
"""Test the `eval_subset_num_batches` is required with infinite dataloader."""
# Construct the trainer
train_dataset = RandomClassificationDataset()
train_dataloader = DataLoader(train_dataset, sampler=dist.get_sampler(train_dataset))
eval_dataset = RandomClassificationDataset()
eval_dataloader = InfiniteDataloader(eval_dataset, sampler=dist.get_sampler(eval_dataset))
with contextlib.nullcontext() if success else pytest.raises(ValueError):
Trainer(
model=SimpleModel(),
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='1ep',
eval_subset_num_batches=eval_subset_num_batches,
)
class BreakBatchAlgorithm(Algorithm):
def __init__(self):
super().__init__()
def match(self, event, state):
return event == Event.EVAL_BEFORE_FORWARD
def apply(self, event, state, logger):
del event, logger # unused
state.batch = None
@pytest.mark.parametrize('add_algorithm', [True, False])
def test_eval_batch_can_be_modified(add_algorithm: bool):
train_dataset = RandomClassificationDataset(size=8)
train_dataloader = DataLoader(train_dataset, batch_size=4, sampler=dist.get_sampler(train_dataset))
eval_dataset = RandomClassificationDataset(size=8)
eval_dataloader = DataLoader(eval_dataset, batch_size=4, sampler=dist.get_sampler(eval_dataset))
with contextlib.nullcontext() if not add_algorithm else pytest.raises(TypeError):
trainer = Trainer(model=SimpleModel(),
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='1ep',
algorithms=[BreakBatchAlgorithm()] if add_algorithm else [])
trainer.eval()
@pytest.mark.parametrize('metric_names', ['MulticlassAccuracy', ['MulticlassAccuracy']])
def test_evaluator_metric_names_string_errors(metric_names):
eval_dataset = RandomClassificationDataset(size=8)
eval_dataloader = DataLoader(eval_dataset, batch_size=4, sampler=dist.get_sampler(eval_dataset))
context = contextlib.nullcontext() if isinstance(metric_names, list) else pytest.raises(
ValueError, match='should be a list of strings')
with context:
_ = Evaluator(label='evaluator', dataloader=eval_dataloader, metric_names=metric_names)
| composer-dev | tests/trainer/test_trainer_eval.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import textwrap
import numpy as np
import pytest
import torch
from packaging import version
from torch.utils.data import DataLoader
from composer.trainer.trainer import Trainer
from composer.utils import dist
from tests.common import RandomClassificationDataset, SimpleModel
from tests.common.markers import world_size
def get_trainer(save_folder=None,
save_filename='ba{batch}-rank{rank}.pt',
num_features=2,
num_classes=2,
fsdp_state_dict_type='full',
load_path=None,
autoresume=False,
run_name=None):
model = SimpleModel(num_features=num_features, num_classes=num_classes)
dataset = RandomClassificationDataset(shape=(num_features, 1, 1), size=128)
dataloader = DataLoader(dataset, sampler=dist.get_sampler(dataset), batch_size=32)
optim = torch.optim.Adam(params=model.parameters())
trainer = Trainer(
model=model,
optimizers=optim,
train_dataloader=dataloader,
fsdp_config={
'min_params': 16,
'state_dict_type': fsdp_state_dict_type,
'sharding_strategy': 'FULL_SHARD'
},
save_folder=save_folder,
max_duration='2ba',
save_interval='2ba',
save_filename=save_filename,
save_overwrite=False,
load_path=load_path,
progress_bar=False,
log_to_console=False,
autoresume=autoresume,
run_name=run_name,
)
return trainer
def _compare_optims_between_state_dicts(state_dict1, state_dict2):
# Check that optim params are equal between checkpoint and in memory optimizer
state_dict1_optim_params = state_dict1['optimizers']['Adam']['state']
state_dict2_optim_params = state_dict2['optimizers']['Adam']['state']
state_dict1_keys = set(state_dict1_optim_params.keys())
state_dict2_keys = set(state_dict2_optim_params.keys())
assert len(state_dict1_keys.symmetric_difference(state_dict2_keys)) == 0, textwrap.dedent(
f"""The two state dicts being compared must have the exact same set of keys,
but instead these keys belong to one, but not the other:
{state_dict1_keys.symmetric_difference(state_dict2_keys)}""")
for param_name in state_dict2_optim_params.keys():
state_dict1_param_moment_dict = state_dict1_optim_params[param_name]
state_dict2_param_moment_dict = state_dict2_optim_params[param_name]
for moment_name in state_dict2_param_moment_dict.keys():
state_dict1_moment = state_dict1_param_moment_dict[moment_name]
state_dict2_moment = state_dict2_param_moment_dict[moment_name]
assert torch.equal(
state_dict1_moment,
state_dict2_moment), f'Moment {moment_name} for parameter {param_name} not the same between state dicts'
def _compare_model_params_between_state_dicts(state_dict1, state_dict2):
# Check that model params are equal between in memory mode and checkpoint
state_dict1_model_params = state_dict1['model']
state_dict2_model_params = state_dict2['model']
state_dict1_keys = set(state_dict1_model_params.keys())
state_dict2_keys = set(state_dict2_model_params.keys())
assert len(state_dict1_keys.symmetric_difference(state_dict2_keys)) == 0, textwrap.dedent(
f"""The two state dicts being compared must have the exact same set of keys,
but instead these keys that belong to one, but not the other:
{state_dict1_keys.symmetric_difference(state_dict2_keys)}""")
for param_name in state_dict2_model_params.keys():
state_dict1_model_tensor = state_dict1_model_params[param_name]
state_dict2_model_tensor = state_dict2_model_params[param_name]
assert torch.equal(state_dict1_model_tensor,
state_dict2_model_tensor), f'Weight named {param_name} not the same between state_dicts'
@pytest.mark.gpu
@world_size(2)
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_full_state_dict_save(world_size, tmp_path: pathlib.Path):
save_folder = tmp_path
save_filename = 'rank{rank}.pt'
num_features = 3
num_classes = 2
expected_layer_shapes = [(5, num_features), (5,), (num_classes, 5), (num_classes,)]
layer1_weights_shape, layer1_bias_shape, layer2_weights_shape, layer2_bias_shape = expected_layer_shapes
expected_total_num_params = sum([np.prod(shape) for shape in expected_layer_shapes])
trainer = get_trainer(save_folder=str(save_folder),
save_filename=save_filename,
num_features=num_features,
num_classes=num_classes,
fsdp_state_dict_type='full')
trainer.fit()
rankn_checkpoint = save_folder / pathlib.Path(f'rank{dist.get_global_rank()}.pt')
# Check that rank 0 saves a checkpoint to disk, but rank 1 does not.
if dist.get_global_rank() == 0:
assert os.path.exists(rankn_checkpoint)
elif dist.get_global_rank() == 1:
assert not os.path.exists(rankn_checkpoint)
state_dict_in_memory = trainer.state.state_dict()
if dist.get_global_rank() == 0:
# Check rank 0 state dict has the full model weights.
assert set(state_dict_in_memory['model'].keys()) == {
'module.2.weight', 'module.2.bias', 'module.4.weight', 'module.4.bias'
}
assert state_dict_in_memory['model']['module.2.weight'].ndim == 2
assert state_dict_in_memory['model']['module.2.weight'].shape == layer1_weights_shape
assert state_dict_in_memory['model']['module.2.bias'].shape == layer1_bias_shape
assert state_dict_in_memory['model']['module.4.weight'].shape == layer2_weights_shape
assert state_dict_in_memory['model']['module.4.bias'].shape == layer2_bias_shape
assert sum([p.numel() for p in state_dict_in_memory['model'].values()]) == expected_total_num_params
# Check rank 0 state dict also has the full optimizer params.
optim_state_dict = state_dict_in_memory['optimizers']['Adam']['state']
assert all([
optim_moment.shape == layer1_weights_shape
for moment_name, optim_moment in optim_state_dict['module.2.weight'].items()
if moment_name != 'step'
])
assert all([
optim_moment.shape == layer2_weights_shape
for moment_name, optim_moment in optim_state_dict['module.4.weight'].items()
if moment_name != 'step'
])
assert all([
optim_moment.shape == layer1_bias_shape
for moment_name, optim_moment in optim_state_dict['module.2.bias'].items()
if moment_name != 'step'
])
assert all([
optim_moment.shape == layer2_bias_shape
for moment_name, optim_moment in optim_state_dict['module.4.bias'].items()
if moment_name != 'step'
])
# Check that checkpoint matches state dict
with open(str(rankn_checkpoint), 'rb') as f:
state_dict_from_checkpoint = torch.load(f)['state']
_compare_model_params_between_state_dicts(state_dict_from_checkpoint, state_dict_in_memory)
_compare_optims_between_state_dicts(state_dict_from_checkpoint, state_dict_in_memory)
if dist.get_global_rank() == 1:
# Check rank 1 state dict just has the flattened shards.
rank1_state_dict_keys = set(state_dict_in_memory['model'].keys())
# Assert all params flattened
assert all([k.endswith('flat_param') for k in rank1_state_dict_keys])
assert all([p.ndim == 1 for p in state_dict_in_memory['model'].values()])
# Assert total number of params is half of the total (because partitioned across 2 ranks).
assert sum([p.numel() for p in state_dict_in_memory['model'].values()
]) == expected_total_num_params / dist.get_world_size()
# In FSDP for full state dicts, the optim state dicts on other ranks are empty dictionaries.
assert state_dict_in_memory['optimizers']['Adam'] == {}
@pytest.mark.gpu
@world_size(2)
@pytest.mark.parametrize('autoresume', [True, False])
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_full_state_dict_load(world_size, tmp_path: pathlib.Path, autoresume: bool):
if autoresume:
run_name = 'my-cool-autoresume-run'
else:
run_name = None
save_folder = tmp_path
save_filename = 'rank{rank}.pt'
trainer1 = get_trainer(save_folder=str(save_folder),
save_filename=save_filename,
fsdp_state_dict_type='full',
run_name=run_name,
autoresume=autoresume)
trainer1.fit()
state_dict_from_trainer1 = trainer1.state.state_dict()
trainer1.close()
load_path = str(save_folder / pathlib.Path('rank{rank}.pt'))
trainer2 = get_trainer(save_folder=str(save_folder),
save_filename=save_filename,
fsdp_state_dict_type='full',
load_path=load_path,
run_name=run_name,
autoresume=autoresume)
state_dict_from_trainer2 = trainer2.state.state_dict()
if dist.get_global_rank() == 0:
_compare_model_params_between_state_dicts(state_dict_from_trainer1, state_dict_from_trainer2)
_compare_optims_between_state_dicts(state_dict_from_trainer1, state_dict_from_trainer2)
@pytest.mark.gpu
@world_size(2)
@pytest.mark.parametrize('state_dict_type', ['local', 'sharded'])
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_partitioned_state_dict_save(world_size, tmp_path: pathlib.Path, state_dict_type: str):
pytest.importorskip('torch.distributed.fsdp.fully_sharded_data_parallel')
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardedTensor
save_folder = tmp_path
save_filename = 'rank{rank}.pt'
num_features = 3
num_classes = 2
expected_layer_shapes = [(5, num_features), (5,), (num_classes, 5), (num_classes,)]
expected_total_num_params = sum([np.prod(shape) for shape in expected_layer_shapes])
trainer = get_trainer(save_folder=str(save_folder),
save_filename=save_filename,
num_features=num_features,
num_classes=num_classes,
fsdp_state_dict_type=state_dict_type)
trainer.fit()
rankn_checkpoint = save_folder / pathlib.Path(f'rank{dist.get_global_rank()}.pt')
# Check that both rank 0 and rank 1 save a checkpoint.
assert os.path.exists(rankn_checkpoint)
state_dict_in_memory = trainer.state.state_dict()
if state_dict_type == 'local':
rankn_state_dict_keys = set(state_dict_in_memory['model'].keys())
# Assert all params flattened
assert all([k.endswith('flat_param') for k in rankn_state_dict_keys])
assert all([p.ndim == 1 for p in state_dict_in_memory['model'].values()])
# Assert all params of type ShardedTensor.
assert all([isinstance(p, ShardedTensor) for p in state_dict_in_memory['model'].values()])
# Assert total number of params is half of the total (because partitioned across 2 ranks). Seems to divide evenly with flattened and sharded.
assert sum([p.local_tensor().numel() for p in state_dict_in_memory['model'].values()
]) == expected_total_num_params / dist.get_world_size()
# Check optimizer is partitioned and flattened.
rank_n_optim_state_dict = state_dict_in_memory['optimizers']['Adam']['state']
# Assert all optim moments are flattened
assert all([
optim_moment.ndim == 1
for module_name in rank_n_optim_state_dict.keys()
for moment_name, optim_moment in rank_n_optim_state_dict[module_name].items()
if moment_name != 'step'
])
# Assert total number of moments in optim state divided across ranks.
moments_per_parameter = 2
assert sum([
optim_moment.numel()
for module_name in rank_n_optim_state_dict.keys()
for moment_name, optim_moment in rank_n_optim_state_dict[module_name].items()
if moment_name != 'step'
]) == (moments_per_parameter * expected_total_num_params) / dist.get_world_size()
if state_dict_type == 'sharded':
rankn_state_dict_keys = set(state_dict_in_memory['model'].keys())
# Assert all params not flattened.
assert not all([p.ndim == 1 for p in state_dict_in_memory['model'].values()])
# Assert all params of type ShardedTensor
assert all([isinstance(p, ShardedTensor) for p in state_dict_in_memory['model'].values()])
# Assert total number of params is less than that of the total (because partitioned across 2 ranks). Does not divide
# evenly with sharded and unflattened, so we just check that the params per rank is less than the total.
assert sum([p.local_tensor().numel() for p in state_dict_in_memory['model'].values()
]) < expected_total_num_params
# Check optimizer is partitioned, but unflattened.
rank_n_optim_state_dict = state_dict_in_memory['optimizers']['Adam']['state']
# Assert all optim moments are flattened
assert not all([
optim_moment.ndim == 1
for module_name in rank_n_optim_state_dict.keys()
for moment_name, optim_moment in rank_n_optim_state_dict[module_name].items()
if moment_name != 'step'
])
# Assert total number of optim params is less than that of the total (because partitioned across 2 ranks). Does not divide
# evenly with sharded and unflattened, so we just check that the optim params per rank is less than the total.
moments_per_parameter = 2
assert sum([
optim_moment.local_tensor().numel()
for module_name in rank_n_optim_state_dict.keys()
for moment_name, optim_moment in rank_n_optim_state_dict[module_name].items()
if moment_name != 'step'
]) < (moments_per_parameter * expected_total_num_params)
# Check state dicts same between the in memory state and the on disk checkpoint for both ranks.
with open(str(rankn_checkpoint), 'rb') as f:
state_dict_from_checkpoint = torch.load(f)['state']
_compare_model_params_between_state_dicts(state_dict_from_checkpoint, state_dict_in_memory)
_compare_optims_between_state_dicts(state_dict_from_checkpoint, state_dict_in_memory)
@pytest.mark.gpu
@world_size(2)
@pytest.mark.parametrize('state_dict_type', ['local', 'sharded'])
@pytest.mark.parametrize('autoresume', [True, False])
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
def test_fsdp_partitioned_state_dict_load(world_size, tmp_path: pathlib.Path, state_dict_type: str, autoresume: bool):
if autoresume:
run_name = 'my-autoresume-run'
else:
run_name = None
save_folder = tmp_path
save_filename = 'rank{rank}.pt'
trainer1 = get_trainer(save_folder=str(save_folder),
save_filename=save_filename,
fsdp_state_dict_type=state_dict_type,
run_name=run_name,
autoresume=autoresume)
trainer1.fit()
state_dict_from_trainer1 = trainer1.state.state_dict()
trainer1.close()
load_path = str(save_folder / pathlib.Path('rank{rank}.pt'))
trainer2 = get_trainer(
save_folder=str(save_folder),
save_filename=save_filename,
fsdp_state_dict_type=state_dict_type,
load_path=load_path,
autoresume=autoresume,
run_name=run_name,
)
state_dict_from_trainer2 = trainer2.state.state_dict()
# Compare saved state and loaded state for both ranks.
_compare_model_params_between_state_dicts(state_dict_from_trainer1, state_dict_from_trainer2)
_compare_optims_between_state_dicts(state_dict_from_trainer1, state_dict_from_trainer2)
| composer-dev | tests/trainer/test_sharded_checkpoint.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
| composer-dev | tests/trainer/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional
import pytest
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.data import DataLoader
from composer.core import State
from composer.devices import DeviceCPU, DeviceGPU
from composer.trainer.dist_strategy import ddp_sync_context, prepare_ddp_module
from composer.utils import dist
from tests.common.datasets import RandomClassificationDataset
class MinimalConditionalModel(nn.Module):
def __init__(self):
super().__init__()
self.choice1 = nn.Linear(1, 1, bias=False)
self.choice2 = nn.Linear(1, 1, bias=False)
self.choice3 = nn.Linear(1, 1, bias=False)
nn.init.constant_(self.choice1.weight, 0)
nn.init.constant_(self.choice2.weight, 0)
nn.init.constant_(self.choice3.weight, 0)
def forward(self, input: int):
if input == 1:
return self.choice1(Tensor([1]))
if input == 2:
return self.choice2(Tensor([1]))
if input == 3:
return self.choice3(Tensor([1]))
raise Exception('Invalid input:', input)
def loss(self, output: Tensor, target: Tensor):
return (output - target) * (output - target)
@pytest.mark.parametrize('ddp_sync_strategy,expected_grads', [
pytest.param('single_auto_sync', ([-1, None, None], [-1, -1.5, None], [-1, -1.5, None]), id='single_auto_sync'),
pytest.param('multi_auto_sync', ([-1.5, None, None], [-1.5, -1.5, None], [-1.5, -1.5, None]), id='multi_auto_sync'),
pytest.param('forced_sync', ([-1, None, None], [-1, -1, None], [-1.5, -1.5, None]), id='forced_sync'),
])
@pytest.mark.world_size(2)
def test_ddp_sync_strategy(
ddp_sync_strategy: str,
expected_grads: List[List[Optional[float]]],
rank_zero_seed: int,
request: pytest.FixtureRequest,
):
original_model = MinimalConditionalModel()
# ddp = DDP(backend="gloo", find_unused_parameters=True, sync_strategy=ddp_sync_strategy, timeout=5.)
optimizer = torch.optim.SGD(original_model.parameters(), 0.1)
device = None
for item in request.session.items:
device = DeviceCPU() if item.get_closest_marker('gpu') is None else DeviceGPU()
break
assert device != None
state = State(
model=original_model,
rank_zero_seed=rank_zero_seed,
run_name='run_name',
device=device,
optimizers=optimizer,
device_train_microbatch_size=1,
max_duration='1ep',
dataloader=DataLoader(RandomClassificationDataset()),
dataloader_label='train',
precision='fp32',
)
batches = [[(1, Tensor([1])), (1, Tensor([2]))], [(2, Tensor([1])), (2, Tensor([2]))]]
state.model = prepare_ddp_module(state.model, find_unused_parameters=True)
optimizer.zero_grad()
for microbatch_idx in range(2):
with ddp_sync_context(state, microbatch_idx == 1, sync_strategy=ddp_sync_strategy):
input, target = batches[microbatch_idx][dist.get_local_rank()]
output = state.model.forward(input)
loss = original_model.loss(output, target)
loss.mul_(1 / 2)
loss.backward()
if dist.get_global_rank() == 0:
grads = [p.grad.item() if p.grad else None for p in original_model.parameters()]
for expected, actual in zip(expected_grads[microbatch_idx], grads):
assert expected == actual
if dist.get_global_rank() == 0:
grads = [p.grad.item() if p.grad else None for p in original_model.parameters()]
for expected, actual in zip(expected_grads[-1], grads):
assert expected == actual
| composer-dev | tests/trainer/test_ddp_sync_strategy.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import pytest
import torch
from torch.utils.data import DataLoader
from composer.core import Callback, Event, State
from composer.loggers import Logger
from composer.trainer.trainer import Trainer
from tests.common import EventCounterCallback, RandomClassificationDataset, SimpleModel
def _assert_predict_events_called_expected_number_of_times(
event_counter: EventCounterCallback,
num_predict_steps: int,
num_predicts: int = 1,
):
event_to_num_expected_invocations = {
Event.PREDICT_START: num_predicts,
Event.PREDICT_BATCH_START: num_predict_steps,
Event.PREDICT_BEFORE_FORWARD: num_predict_steps,
Event.PREDICT_AFTER_FORWARD: num_predict_steps,
Event.PREDICT_BATCH_END: num_predict_steps,
Event.PREDICT_END: num_predicts,
}
for event, expected in event_to_num_expected_invocations.items():
actual = event_counter.event_to_num_calls[event]
assert expected == actual, f'Event {event} expected to be called {expected} times, but instead it was called {actual} times'
class PredictionSaver(Callback):
def __init__(self, folder: str):
self.folder = folder
os.makedirs(self.folder, exist_ok=True)
def predict_batch_end(self, state: State, logger: Logger) -> None:
name = f'batch_{int(state.predict_timestamp.batch)}.pt'
filepath = os.path.join(self.folder, name)
torch.save(state.outputs, filepath)
# Also upload the files
logger.upload_file(remote_file_name=name, file_path=filepath)
class TestTrainerPredict():
@pytest.mark.parametrize('subset_num_batches', [-1, 1])
def test_predict(self, subset_num_batches: int):
# Create the trainer and train
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
train_dataloader=DataLoader(dataset=RandomClassificationDataset()),
max_duration='1ba',
callbacks=[event_counter_callback],
)
trainer.fit()
# Remove the datalaoder from the state (to ensure that the predict dl is being used)
trainer.state.set_dataloader(None)
# Run predict()
predict_dl = DataLoader(dataset=RandomClassificationDataset())
trainer.predict(predict_dl, subset_num_batches)
# Validate that the predict events were called the correct number of times
num_predict_batches = subset_num_batches if subset_num_batches >= 0 else len(predict_dl)
_assert_predict_events_called_expected_number_of_times(event_counter_callback, num_predict_batches)
def test_timestamps(self):
# Construct the trainer
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=SimpleModel(),
callbacks=[event_counter_callback],
)
# Predict on the model
predict_dataloader = DataLoader(dataset=RandomClassificationDataset())
trainer.predict(predict_dataloader)
# Ensure that the predict timestamp matches the number of prediction events
assert event_counter_callback.event_to_num_calls[
Event.PREDICT_BATCH_START] == trainer.state.predict_timestamp.batch
assert trainer.state.predict_timestamp.batch == trainer.state.predict_timestamp.batch_in_epoch
# Ensure that if we predict again, the predict timestamp was reset
# Reset the event counter callback
event_counter_callback.event_to_num_calls = {k: 0 for k in event_counter_callback.event_to_num_calls}
# Predict again
trainer.predict(predict_dataloader)
# Validate the same invariants
assert event_counter_callback.event_to_num_calls[
Event.PREDICT_BATCH_START] == trainer.state.predict_timestamp.batch
assert trainer.state.predict_timestamp.batch == trainer.state.predict_timestamp.batch_in_epoch
@pytest.mark.parametrize('return_outputs', [True, False])
@pytest.mark.parametrize('device', ['cpu', pytest.param('gpu', marks=pytest.mark.gpu)])
def test_return_outputs(self, return_outputs: bool, tmp_path: pathlib.Path, device: str):
# Construct the trainer
folder = str(tmp_path / 'prediction_outputs')
prediction_saver_callback = PredictionSaver(folder)
trainer = Trainer(
model=SimpleModel(),
device=device,
callbacks=[prediction_saver_callback],
)
# Predict on the model
predict_dataloader = DataLoader(dataset=RandomClassificationDataset())
outputs = trainer.predict(predict_dataloader, subset_num_batches=1, return_outputs=return_outputs)
if return_outputs:
assert len(outputs) > 0
else:
assert len(outputs) == 0
for output in outputs:
assert output.device.type == 'cpu'
loaded_output = torch.load(os.path.join(folder, 'batch_1.pt'), map_location='cpu')
assert loaded_output.shape == (1, 2)
| composer-dev | tests/trainer/test_predict.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Any
import pytest
import torch
from torch.utils.data import DataLoader
from composer import DataSpec, Trainer
from tests.common import RandomClassificationDataset, SimpleModel
N = 128
class TestDefaultGetNumSamples:
@pytest.fixture
def dataspec(self):
dataloader = DataLoader(RandomClassificationDataset())
return DataSpec(dataloader=dataloader)
# yapf: disable
@pytest.mark.parametrize('batch', [
{'a': torch.rand(N, 8), 'b': torch.rand(N, 64)}, # dict
[{'a': torch.rand(N, 8)}, {'c': torch.rand(N, 64)}], # list of dict
(torch.rand(N, 8), torch.rand(N, 64)), # tuple
[torch.rand(N, 8), torch.rand(N, 64)], # list
torch.rand(N, 8), # tensor
torch.rand(N, 8, 4, 2), # 4-dim tensor
])
# yapf: enable
def test_num_samples_infer(self, batch: Any, dataspec: DataSpec):
assert dataspec._default_get_num_samples_in_batch(batch) == N
def test_batch_dict_mismatch(self, dataspec: DataSpec):
N = 128
batch = {'a': torch.rand(N, 8), 'b': torch.rand(N * 2, 64)}
with pytest.raises(NotImplementedError, match='multiple Tensors'):
dataspec._default_get_num_samples_in_batch(batch)
def test_unable_to_infer(self, dataspec: DataSpec):
N = 128
batch = [torch.rand(N, 8), 'I am a string.']
with pytest.raises(ValueError, match='Unable to determine'):
dataspec._default_get_num_samples_in_batch(batch)
def test_small_batch_at_end_warning():
batch_size = 4
dataset_size = 17
eval_batch_size = 2
eval_dataset_size = 25
model = SimpleModel()
trainer = Trainer(
model=model,
eval_interval=f'2ba',
train_dataloader=DataLoader(RandomClassificationDataset(size=dataset_size), batch_size=batch_size),
eval_dataloader=DataLoader(RandomClassificationDataset(size=eval_dataset_size), batch_size=eval_batch_size),
max_duration=f'8ba',
)
with pytest.warns(UserWarning, match='Cannot split tensor of length.*'):
trainer.fit()
| composer-dev | tests/trainer/test_dataspec.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import collections.abc
import contextlib
import copy
import datetime
import os
import pathlib
import time
from typing import Any, Dict, List, Optional, Union
import pytest
import torch
from packaging import version
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from composer import Callback, Evaluator, Trainer
from composer.algorithms import CutOut, LabelSmoothing
from composer.core import Event, Precision, State, Time, TimeUnit
from composer.datasets.ffcv_utils import write_ffcv_dataset
from composer.datasets.imagenet import build_ffcv_imagenet_dataloader
from composer.devices import Device
from composer.loggers import InMemoryLogger, Logger, RemoteUploaderDownloader
from composer.loss import soft_cross_entropy
from composer.models import ComposerModel
from composer.optim import ExponentialScheduler
from composer.trainer.trainer import _generate_run_name
from composer.utils import dist, is_model_deepspeed, is_model_fsdp, map_collection, reproducibility
from tests.common import (InfiniteClassificationDataset, RandomClassificationDataset, RandomImageDataset,
SimpleConvModel, SimpleModel, device, world_size)
from tests.common.events import EventCounterCallback
from tests.test_state import assert_state_equivalent
class SleepyCallback(Callback):
def __init__(self, sleep_duration: datetime.timedelta, event: Event) -> None:
self.sleep_duration = sleep_duration
self.event = event
def run_event(self, event: Event, state: State, logger: Logger) -> None:
if event == self.event:
time.sleep(self.sleep_duration.total_seconds())
class TestTrainerInit():
@pytest.fixture
def model(self):
return SimpleModel()
def test_minimal_init(self, model: ComposerModel):
Trainer(model=model)
@world_size(1, 2)
def test_model_ddp_wrapped(self, model: ComposerModel, world_size: int):
trainer = Trainer(model=model)
should_be_ddp_wrapped = dist.get_world_size() > 1
assert isinstance(trainer.state.model, DistributedDataParallel) == should_be_ddp_wrapped
def test_invalid_device(self, model: ComposerModel):
with pytest.raises(ValueError, match='magic_device'):
Trainer(model=model, device='magic_device')
@world_size(1, 2)
@device('gpu', 'cpu')
def test_gpu_logging(self, model: ComposerModel, world_size: int, device: str):
in_mem_logger = InMemoryLogger()
Trainer(model=model, loggers=[in_mem_logger])
expected_key, expected_value = f'num_{device}s_per_node', world_size
assert expected_key in in_mem_logger.hyperparameters
assert in_mem_logger.hyperparameters[expected_key] == expected_value
@device('gpu', 'cpu')
def test_optimizer_params_on_device(
self,
model: ComposerModel,
device: str,
):
# Train a model
train_dataset = RandomClassificationDataset()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
max_duration = '2ba'
trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=DataLoader(train_dataset, sampler=dist.get_sampler(train_dataset)),
optimizers=optimizer,
)
trainer.fit()
# Assert that the parameters are on the correct devices
parameters = trainer.state.optimizers[0].param_groups[0]['params']
target_device = 'cuda' if device == 'gpu' else 'cpu'
assert all(param.device.type == target_device for param in parameters)
def _assert_optimizer_is_on_device(optimizer: torch.optim.Optimizer):
for state in optimizer.state.values():
for v in state.values():
if isinstance(v, torch.Tensor):
assert v.device.type == 'cuda'
def _get_classification_dataloader():
dataset = RandomClassificationDataset(size=2)
return DataLoader(dataset, sampler=dist.get_sampler(dataset))
class TestTrainerInitOrFit:
"""Validate that certain parameters can be passed in on `Trainer.__init__()` or `Trainer.fit()`"""
@pytest.fixture
def train_dataloader(self):
dataset = RandomClassificationDataset(size=10)
return DataLoader(dataset=dataset, batch_size=2, sampler=dist.get_sampler(dataset))
@pytest.fixture
def model(self):
return SimpleModel()
@pytest.fixture
def max_duration(self):
return Time(1, TimeUnit.EPOCH)
@pytest.mark.parametrize('train_subset_num_batches', [-1, 1])
def test_train_dataloader(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
train_subset_num_batches: int,
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the train_dataloader params on Trainer.__init__()
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
train_subset_num_batches=train_subset_num_batches,
)
init_trainer.fit()
# Train again with the train_dataloader params specified on Trainer.fit()
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
)
fit_trainer.fit(
train_dataloader=train_dataloader,
train_subset_num_batches=train_subset_num_batches,
)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
@pytest.mark.parametrize('max_duration', [1, '1ep', '1ba', Time(1, TimeUnit.EPOCH)])
def test_max_duration(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the max_duration param on Trainer.__init__()
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
)
init_trainer.fit()
# Train again with the max_duration param specified on Trainer.fit()
fit_trainer = Trainer(
model=copied_model,
train_dataloader=train_dataloader,
)
fit_trainer.fit(duration=max_duration)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
@pytest.mark.parametrize('max_duration', [1, '1ep', '1ba', '1sp'])
@pytest.mark.parametrize('train_subset_num_batches', [-1, 1])
def test_infinite_train_loader(self, model: ComposerModel, max_duration: Union[int, str],
train_subset_num_batches: int):
should_raise = (isinstance(max_duration, int) or
max_duration.endswith('ep')) and (train_subset_num_batches is None or
train_subset_num_batches == -1)
context = pytest.raises(
ValueError,
match='max_duration cannot be specified in epochs') if should_raise else contextlib.nullcontext()
with context:
train_loader = DataLoader(InfiniteClassificationDataset(), batch_size=4)
trainer = Trainer(model=model,
train_dataloader=train_loader,
max_duration=max_duration,
train_subset_num_batches=train_subset_num_batches)
trainer.fit()
@pytest.mark.parametrize('reset_time', [True, False])
@pytest.mark.parametrize('new_duration', [
Time.from_timestring('1ep'),
Time.from_timestring('1ba'),
Time.from_timestring('2ep'),
None,
])
def test_reset_time(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
new_duration: Time,
reset_time: bool,
):
# Train once
trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
)
trainer.fit()
# Get the timestamp
first_timestamp = trainer.state.timestamp
# It should error if the time is not being reset. Otherwise, it should be reset and train OK.
error_msg = 'Please provide the `duration` or specify `reset_time=True`'
ctx = pytest.raises(ValueError,
match=error_msg) if not new_duration and not reset_time else contextlib.nullcontext()
with ctx:
# Train again for the same amount of time
trainer.fit(
duration=new_duration,
train_dataloader=train_dataloader,
reset_time=reset_time,
)
# If the fit did not error (new_duration is specified), then assert that the time
# matches what is expected
if new_duration is not None:
if reset_time:
assert trainer.state.timestamp.get(new_duration.unit) == new_duration
else:
first_timestamp_in_new_unit = getattr(first_timestamp, new_duration.unit.name.lower())
assert trainer.state.timestamp.get(new_duration.unit) == first_timestamp_in_new_unit + new_duration
@pytest.mark.parametrize('scale_schedule_ratio', [1.0, 2.0])
@pytest.mark.parametrize('step_schedulers_every_batch', [None, True, False])
def test_schedulers(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
scale_schedule_ratio: float,
step_schedulers_every_batch: Optional[bool],
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the scheduler params on Trainer.__init__()
scheduler = ExponentialScheduler(2.0)
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
schedulers=scheduler,
scale_schedule_ratio=scale_schedule_ratio,
step_schedulers_every_batch=step_schedulers_every_batch,
)
init_trainer.fit()
# Train again with the scheduler params specified on Trainer.fit()
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
train_dataloader=train_dataloader,
)
fit_trainer.fit(
schedulers=scheduler,
scale_schedule_ratio=scale_schedule_ratio,
step_schedulers_every_batch=step_schedulers_every_batch,
)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
@pytest.mark.parametrize('eval_subset_num_batches', [-1, 1])
@pytest.mark.parametrize('eval_interval', ['1ep', '1ba'])
@pytest.mark.parametrize(
'eval_dataloader',
[
_get_classification_dataloader(), # a normal dataloader
Evaluator(
label='eval',
dataloader=_get_classification_dataloader(),
metric_names=['MulticlassAccuracy'],
), # an evaluator
[ # multiple evaluators
Evaluator(
label='eval1',
dataloader=_get_classification_dataloader(),
metric_names=['MulticlassAccuracy'],
),
Evaluator(
label='eval2',
dataloader=_get_classification_dataloader(),
metric_names=['MulticlassAccuracy'],
),
],
],
)
def test_eval_dataloader(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
eval_subset_num_batches: int,
eval_interval: str,
eval_dataloader: Union[Evaluator, DataLoader, List[Evaluator]],
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the eval_dataloader params on Trainer.__init__()
init_event_counter_callback = EventCounterCallback() # track the number of times eval is called
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
callbacks=[init_event_counter_callback],
eval_subset_num_batches=eval_subset_num_batches,
eval_interval=eval_interval,
)
init_trainer.fit()
# Train again with the eval_dataloader params specified on Trainer.fit()
fit_event_counter_callback = EventCounterCallback() # track the number of times eval is called
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
train_dataloader=train_dataloader,
callbacks=[fit_event_counter_callback],
)
fit_trainer.fit(
eval_dataloader=eval_dataloader,
eval_subset_num_batches=eval_subset_num_batches,
eval_interval=eval_interval,
)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
def test_microbatch(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
):
device_train_microbatch_size = 1
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the device_train_microbatch_size param on Trainer.__init__()
init_event_counter_callback = EventCounterCallback() # track the number of times microbatches are trained
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
device_train_microbatch_size=device_train_microbatch_size,
callbacks=[init_event_counter_callback],
)
init_trainer.fit()
# Train again with the device_train_microbatch_size param specified on Trainer.fit()
fit_event_counter_callback = EventCounterCallback() # track the number of times microbatches are trained
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
train_dataloader=train_dataloader,
callbacks=[fit_event_counter_callback],
)
fit_trainer.fit(device_train_microbatch_size=device_train_microbatch_size)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
@pytest.mark.gpu
@pytest.mark.filterwarnings(
"ignore:Setting `device_train_microbatch_size='auto'` is an experimental feature which may cause uncaught Cuda Out of Memory errors. In this case, please manually set device_train_microbatch_size explicitly to an integer instead."
)
@pytest.mark.parametrize('dataloader_in_init', [True, False])
def test_auto_microbatch(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
dataloader_in_init: bool,
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
# Train once with the device_train_microbatch_size=1
init_event_counter_callback = EventCounterCallback() # track the number of times microbatches are trained
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader if dataloader_in_init else None,
device_train_microbatch_size='auto',
callbacks=[init_event_counter_callback],
)
init_trainer.fit(train_dataloader=train_dataloader if not dataloader_in_init else None)
# Train again with the device_train_microbatch_size='auto'
fit_event_counter_callback = EventCounterCallback() # track the number of times microbatches are trained
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
train_dataloader=train_dataloader if dataloader_in_init else None,
callbacks=[fit_event_counter_callback],
)
fit_trainer.fit(
train_dataloader=train_dataloader if not dataloader_in_init else None,
device_train_microbatch_size='auto',
)
# Assert that the states are equivalent
assert_state_equivalent(init_trainer.state, fit_trainer.state)
@pytest.mark.gpu
@pytest.mark.parametrize('precision', [Precision.FP32, Precision.AMP_BF16, Precision.AMP_FP16])
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_deepspeed(
self,
model: ComposerModel,
precision: Precision,
max_duration: Time[int],
train_dataloader: DataLoader,
):
trainer = Trainer(
model=model,
precision=precision,
deepspeed_config={},
max_duration=max_duration,
train_dataloader=train_dataloader,
)
assert is_model_deepspeed(trainer.state.model)
assert trainer.state.deepspeed_enabled
trainer.fit()
@pytest.mark.gpu
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.13.0'),
reason='requires PyTorch 1.13 or higher')
@pytest.mark.parametrize('precision', [Precision.FP32, Precision.AMP_BF16, Precision.AMP_FP16])
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_fsdp(
self,
model: ComposerModel,
precision: Precision,
max_duration: Time[int],
train_dataloader: DataLoader,
):
if precision == Precision.FP32: # FSDP FULL_SHARD doesn't support FP32
return
fsdp_config = {
'sharding_strategy': 'FULL_SHARD',
'min_params': 1e8,
'cpu_offload': False,
'mixed_precision': 'PURE',
'backward_prefetch': 'BACKWARD_PRE',
'activation_checkpointing': False,
'activation_cpu_offload': False,
'verbose': False
}
# Need to catch the case where we try to train
# with precision FP16.
ctx = contextlib.nullcontext()
should_error = False
with ctx:
trainer = Trainer(
model=model,
precision=precision,
fsdp_config=fsdp_config,
max_duration=max_duration,
train_dataloader=train_dataloader,
)
if not should_error:
assert is_model_fsdp(trainer.state.model)
assert trainer.state.fsdp_enabled
trainer.fit()
@pytest.mark.gpu
def test_device(
self,
model: ComposerModel,
max_duration: Time[int],
train_dataloader: DataLoader,
):
trainer = Trainer(model=model, device='gpu', max_duration=max_duration, train_dataloader=train_dataloader)
# Run fit to ensure there are no device mismatches
trainer.fit()
# Finally assert the devices are correct
assert all(p.device.type == 'cuda' for p in trainer.state.model.parameters())
map_collection(trainer.state.optimizers, _assert_optimizer_is_on_device)
@pytest.mark.gpu
def test_device_with_checkpoint(
self,
model: ComposerModel,
tmp_path: pathlib.Path,
max_duration: Time[int],
train_dataloader: DataLoader,
):
copied_model = copy.deepcopy(model)
trainer = Trainer(model=model, device='gpu', max_duration=max_duration, train_dataloader=train_dataloader)
checkpoint_path = str(tmp_path / 'checkpoint.pt')
trainer.save_checkpoint(checkpoint_path)
trainer_2 = Trainer(model=copied_model,
load_path=checkpoint_path,
max_duration=max_duration,
train_dataloader=train_dataloader)
# Run fit to ensure there are no device mismatches
trainer_2.fit(reset_time=True)
# And ensure the device on the new trainer is correct
assert all(p.device.type == 'cuda' for p in trainer_2.state.model.parameters())
map_collection(trainer_2.state.optimizers, _assert_optimizer_is_on_device)
@pytest.mark.parametrize('precision', [Precision.FP32, Precision.AMP_BF16, Precision.AMP_FP16])
@pytest.mark.parametrize('device', ['cpu', pytest.param('gpu', marks=pytest.mark.gpu)])
def test_precision(
self,
model: ComposerModel,
precision: Precision,
device: str,
train_dataloader: DataLoader,
max_duration: Time[int],
):
# Copy the model so the fit_trainer can start with the same parameter values as the init_trainer
copied_model = copy.deepcopy(model)
should_error = False
ctx = contextlib.nullcontext()
if device == 'cpu' and precision != Precision.FP32:
ctx = pytest.raises(ValueError, match='not supported for CPU training.')
should_error = True
with ctx:
# Train once with the precision param on Trainer.__init__()
init_trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
precision=precision,
device=device,
)
if not should_error:
init_trainer.fit()
# Train again with the precision param specified on Trainer.fit()
fit_trainer = Trainer(
model=copied_model,
max_duration=max_duration,
train_dataloader=train_dataloader,
device=device,
)
with ctx:
fit_trainer.fit(precision=precision)
# Assert that the states are equivalent, if we did train
if not should_error:
assert_state_equivalent(init_trainer.state, fit_trainer.state)
def test_dataloader_active_iterator_error(self, model: ComposerModel):
dataset = RandomClassificationDataset()
dataloader = DataLoader(
dataset=dataset,
persistent_workers=True,
num_workers=1,
sampler=dist.get_sampler(dataset),
)
# spin one sample
_ = next(dataloader.__iter__())
# Assert the error is raised if the dataloader is specified in init
with pytest.raises(ValueError, match='active iterator'):
Trainer(
model=model,
train_dataloader=dataloader,
)
# Or if the dataloader is specified on fit
with pytest.raises(ValueError, match='active iterator'):
trainer = Trainer(model=model)
trainer.fit(train_dataloader=dataloader)
def test_multiple_calls_to_fit(
self,
train_dataloader: DataLoader,
model: ComposerModel,
max_duration: Time[int],
):
"""Test that the trainer supports multiple calls to fit."""
# Note that callbacks are tested seperately in tests/callbacks/test_callbacks.py
# To ensure that they support multiple calls of Event.INIT and Event.FIT
trainer = Trainer(
model=model,
max_duration=max_duration,
train_dataloader=train_dataloader,
)
# Train once
trainer.fit()
# Train again.
trainer.fit(duration=max_duration)
assert trainer.state.timestamp.get(max_duration.unit) == 2 * max_duration
@pytest.mark.parametrize('eval_interval', ['1ba', '1ep'])
def test_eval_is_excluded_from_wct_tracking(
self,
train_dataloader: DataLoader,
model: ComposerModel,
eval_interval: str,
):
# Construct the trainer with a callback that sleeps during evaluation
sleep_duration = datetime.timedelta(seconds=0.05)
sleepy_callback = SleepyCallback(
sleep_duration=sleep_duration,
event=Event.EVAL_AFTER_FORWARD,
)
event_counter_callback = EventCounterCallback()
dataset = RandomClassificationDataset()
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
train_subset_num_batches=2, # make training fast
eval_dataloader=DataLoader(
dataset=dataset,
batch_size=2,
sampler=dist.get_sampler(dataset),
),
callbacks=[sleepy_callback, event_counter_callback],
eval_interval=eval_interval,
max_duration='2ep',
eval_subset_num_batches=1,
)
# Train
trainer.fit()
# Validate that eval was called
expected_num_evals = 4 if eval_interval == '1ba' else 2
assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == expected_num_evals
# Validate the timestamps.
# Training duration should be less than the sleeping
assert trainer.state.timestamp.total_wct < sleep_duration * expected_num_evals
# The last evaluation duration should be at least as much as the sleeping
assert trainer.state.eval_timestamp.total_wct > sleep_duration
@pytest.mark.world_size(2)
def test_wct_consistency_across_ranks(
self,
train_dataloader: DataLoader,
model: ComposerModel,
):
"""Test that the wct is the same across multiple ranks"""
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration='1ba',
)
trainer.fit()
# First check that the timestamp is non-zero
timestamp = trainer.state.timestamp
assert timestamp.total_wct.total_seconds() > 0
assert timestamp.epoch_wct.total_seconds() > 0
assert timestamp.batch_wct.total_seconds() > 0
# Validate it is the same across ranks
my_timestamp_tensor = torch.tensor([
timestamp.total_wct.total_seconds(),
timestamp.epoch_wct.total_seconds(),
timestamp.batch_wct.total_seconds(),
],
dtype=torch.float64)
rank_zero_timestamp_tensor = torch.tensor([
timestamp.total_wct.total_seconds(),
timestamp.epoch_wct.total_seconds(),
timestamp.batch_wct.total_seconds(),
],
dtype=torch.float64)
dist.broadcast(rank_zero_timestamp_tensor, src=0)
assert torch.all(my_timestamp_tensor == rank_zero_timestamp_tensor)
@pytest.mark.parametrize('unit', [TimeUnit.EPOCH, TimeUnit.BATCH, TimeUnit.SAMPLE])
def test_training_duration_unit(
self,
train_dataloader: DataLoader,
model: ComposerModel,
unit: TimeUnit,
):
"""Test that the time is correctly set, and events fire correctly, with multiple calls to fit,
regardless of the time unit"""
# Construct the trainer
event_counter_callback = EventCounterCallback()
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
callbacks=[event_counter_callback],
)
# Get the batch size
batch_size = train_dataloader.batch_size
assert batch_size is not None
# Get the dataloader length
dataloader_len = trainer.state.dataloader_len
assert dataloader_len is not None
dataloader_len = int(dataloader_len)
# Get the dataset size
assert train_dataloader.dataset is not None
assert isinstance(train_dataloader.dataset, collections.abc.Sized)
num_samples_per_epoch = len(train_dataloader.dataset)
assert num_samples_per_epoch % batch_size == 0, 'This test assumes no drop_last'
# Determine the duration (given the unit) and the number of calls to .fit()
# to train 1 epoch
if unit == TimeUnit.SAMPLE:
duration = Time.from_sample(batch_size)
num_steps_per_epoch = num_samples_per_epoch // batch_size
elif unit == TimeUnit.BATCH:
duration = Time.from_batch(1)
num_steps_per_epoch = dataloader_len
elif unit == TimeUnit.EPOCH:
duration = Time.from_epoch(1)
num_steps_per_epoch = 1
else:
raise ValueError(f'Unsupported unit: {unit}')
current_epoch_time = datetime.timedelta(seconds=0)
# Train for one epoch, incrementally in steps of size `duration`
for i in range(num_steps_per_epoch):
# Train for `duration`
trainer.fit(duration=duration)
# Determine the number of batches trained
if unit in (TimeUnit.SAMPLE, TimeUnit.BATCH):
num_batches_trained = i + 1
else:
num_batches_trained = dataloader_len
# Validate the time
assert trainer.state.timestamp.batch == num_batches_trained
assert trainer.state.timestamp.sample == num_batches_trained * batch_size
assert trainer.state.timestamp.token == 0 # tokens not tracked
assert trainer.state.timestamp.token_in_epoch == 0 # tokens not tracked
assert trainer.state.timestamp.total_wct > current_epoch_time
# Validate the event counter callback
assert event_counter_callback.event_to_num_calls[Event.EPOCH_START] == 1
assert event_counter_callback.event_to_num_calls[Event.BATCH_START] == num_batches_trained
assert event_counter_callback.event_to_num_calls[Event.BATCH_END] == num_batches_trained
assert event_counter_callback.event_to_num_calls[Event.BATCH_CHECKPOINT] == num_batches_trained
if num_batches_trained < num_steps_per_epoch:
# Not yet finished the epoch
assert trainer.state.timestamp.epoch == 0
assert trainer.state.timestamp.batch_in_epoch == num_batches_trained
assert trainer.state.timestamp.sample_in_epoch == num_batches_trained * batch_size
assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 0
assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 0
assert trainer.state.timestamp.epoch_wct > current_epoch_time
assert trainer.state.timestamp.epoch_wct == trainer.state.timestamp.total_wct
if i > 0:
assert trainer.state.timestamp.epoch_wct > trainer.state.timestamp.batch_wct
else:
assert trainer.state.timestamp.epoch_wct == trainer.state.timestamp.batch_wct
else:
# Finished the epoch
assert trainer.state.timestamp.epoch == 1
assert trainer.state.timestamp.batch_in_epoch == 0
assert trainer.state.timestamp.sample_in_epoch == 0
assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 1
assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 1
assert trainer.state.timestamp.epoch_wct == datetime.timedelta(seconds=0)
assert trainer.state.timestamp.batch_wct == datetime.timedelta(seconds=0)
current_epoch_time = trainer.state.timestamp.total_wct
# Train for a second epoch
# Validate that batch_in_epoch / sample_in_epoch are reset properly
for i in range(num_steps_per_epoch):
# Train for `duration`
trainer.fit(duration=duration)
# Determine the number of batches trained in the epoch
if unit in (TimeUnit.SAMPLE, TimeUnit.BATCH):
num_batches_trained = i + 1
else:
num_batches_trained = dataloader_len
# Validate the time
assert trainer.state.timestamp.batch == dataloader_len + num_batches_trained
assert trainer.state.timestamp.sample == num_samples_per_epoch + num_batches_trained * batch_size
assert trainer.state.timestamp.token == 0 # tokens not tracked
assert trainer.state.timestamp.token_in_epoch == 0 # tokens not tracked
assert trainer.state.timestamp.total_wct > trainer.state.timestamp.batch_wct
assert trainer.state.timestamp.total_wct > trainer.state.timestamp.epoch_wct
# Validate the event counter callback
assert event_counter_callback.event_to_num_calls[Event.EPOCH_START] == 2
assert event_counter_callback.event_to_num_calls[Event.BATCH_START] == dataloader_len + num_batches_trained
assert event_counter_callback.event_to_num_calls[Event.BATCH_END] == dataloader_len + num_batches_trained
assert event_counter_callback.event_to_num_calls[
Event.BATCH_CHECKPOINT] == dataloader_len + num_batches_trained
if num_batches_trained < num_steps_per_epoch:
# Not yet finished the epoch
assert trainer.state.timestamp.epoch == 1
assert trainer.state.timestamp.batch_in_epoch == num_batches_trained
assert trainer.state.timestamp.sample_in_epoch == num_batches_trained * batch_size
assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 1
assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 1
else:
# Finished the epoch
assert trainer.state.timestamp.epoch == 2
assert trainer.state.timestamp.batch_in_epoch == 0
assert trainer.state.timestamp.sample_in_epoch == 0
assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 2
assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 2
@pytest.mark.vision
class TestFFCVDataloaders:
train_file = None
val_file = None
tmp_path = None
@pytest.fixture(autouse=True)
def create_dataset(self, tmp_path_factory: pytest.TempPathFactory):
dataset_train = RandomImageDataset(size=16, is_PIL=True)
self.tmp_path = tmp_path_factory.mktemp('ffcv')
output_train_file = str(self.tmp_path / 'train.ffcv')
write_ffcv_dataset(dataset_train, write_path=output_train_file, num_workers=1, write_mode='proportion')
dataset_val = RandomImageDataset(size=16, is_PIL=True)
output_val_file = str(self.tmp_path / 'val.ffcv')
write_ffcv_dataset(dataset_val, write_path=output_val_file, num_workers=1, write_mode='proportion')
self.train_file = output_train_file
self.val_file = output_val_file
def _get_dataloader(self, is_train):
assert self.tmp_path is not None
assert self.train_file is not None
assert self.val_file is not None
datadir = os.path.join(self.tmp_path, self.train_file if is_train else self.val_file)
return build_ffcv_imagenet_dataloader(
datadir=str(datadir),
global_batch_size=4,
is_train=is_train,
num_workers=0,
)
@pytest.fixture
def config(self):
try:
import ffcv
except ImportError as e:
raise ImportError(('Composer was installed without ffcv support. '
'To use ffcv with Composer, please install ffcv in your environment.')) from e
train_dataloader = self._get_dataloader(is_train=True)
val_dataloader = self._get_dataloader(is_train=False)
assert isinstance(train_dataloader, ffcv.Loader)
assert isinstance(val_dataloader, ffcv.Loader)
return {
'model': SimpleConvModel(),
'train_dataloader': train_dataloader,
'eval_dataloader': val_dataloader,
'max_duration': '2ep',
}
"""
Tests that training completes with ffcv dataloaders.
"""
@device('gpu-amp', precision=True)
def test_ffcv(self, config, device, precision):
config['device'] = device
config['precision'] = precision
trainer = Trainer(**config)
trainer.fit()
@world_size(1, 2)
@device('cpu', 'gpu', 'gpu-amp', precision=True)
class TestTrainerEquivalence():
default_threshold = {'atol': 0, 'rtol': 0}
reference_model = None
reference_folder = None
def assert_models_equal(self, model_1, model_2, threshold=None):
if threshold is None:
threshold = self.default_threshold
assert model_1 is not model_2, 'Same model should not be compared.'
for param1, param2 in zip(model_1.parameters(), model_2.parameters()):
torch.testing.assert_close(param1, param2, **threshold)
@pytest.fixture
def config(self, device: Device, precision: Precision, world_size: int, rank_zero_seed: int):
"""Returns the reference config."""
train_dataset = RandomClassificationDataset(size=16)
eval_dataset = RandomClassificationDataset(size=16)
return {
'model':
SimpleModel(),
'train_dataloader':
DataLoader(
dataset=train_dataset,
batch_size=4,
sampler=dist.get_sampler(train_dataset),
),
'eval_dataloader':
DataLoader(
dataset=eval_dataset,
sampler=dist.get_sampler(eval_dataset),
),
'max_duration':
'2ep',
'seed':
rank_zero_seed,
'device':
device,
'precision':
precision,
'loggers': [], # no progress bar
}
@pytest.fixture(autouse=True)
def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):
"""Trains the reference model, and saves checkpoints."""
config = copy.deepcopy(config) # ensure the reference model is not passed to tests
save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))
config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})
trainer = Trainer(**config)
trainer.fit()
self.reference_model = trainer.state.model
self.reference_folder = save_folder
def test_determinism(self, config, *args):
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_microbatch_size(self, config, precision, *args):
# microbatching requires non-zero tolerance
# Precision.AMP requires a even higher tolerance.
threshold = {
'atol': 1e-04 if precision == Precision.AMP_FP16 else 1e-05,
'rtol': 1e-02 if precision == Precision.AMP_FP16 else 1e-04,
}
config.update({
'device_train_microbatch_size': 2,
})
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model, threshold=threshold)
def test_max_duration(self, config, *args):
num_batches = 2 * len(config['train_dataloader']) # convert 2ep to batches
config['max_duration'] = f'{num_batches}ba'
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_checkpoint(self, config, *args):
# load from epoch 1 checkpoint and finish training
assert self.reference_folder is not None
checkpoint_file = os.path.join(self.reference_folder, 'ep1.pt')
config['load_path'] = checkpoint_file
trainer = Trainer(**config)
assert trainer.state.timestamp.epoch == '1ep' # ensure checkpoint state loaded
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_tuple_loss_trainer(self, config, *args):
def tuple_loss(outputs, targets, *args, **kwargs):
loss1 = 0.25 * soft_cross_entropy(outputs, targets, *args, **kwargs)
loss2 = 0.75 * soft_cross_entropy(outputs, targets, *args, **kwargs)
return (loss1, loss2)
config['model']._loss_fn = tuple_loss
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_dict_loss_trainer(self, config, *args):
def dict_loss(outputs, targets, *args, **kwargs):
losses = {}
losses['cross_entropy1'] = 0.25 * soft_cross_entropy(outputs, targets, *args, **kwargs)
losses['cross_entropy2'] = 0.75 * soft_cross_entropy(outputs, targets, *args, **kwargs)
return losses
config['model']._loss_fn = dict_loss
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_dict_loss_total_trainer(self, config, *args):
def dict_loss_total(outputs, targets, *args, **kwargs):
losses = {}
losses['cross_entropy1'] = 2 * soft_cross_entropy(outputs, targets, *args, **kwargs)
losses['cross_entropy2'] = 3 * soft_cross_entropy(outputs, targets, *args, **kwargs)
losses['total'] = soft_cross_entropy(outputs, targets, *args, **kwargs)
return losses
config['model']._loss_fn = dict_loss_total
trainer = Trainer(**config)
trainer.fit()
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_algorithm_different(self, config, *args):
# as a control, we train with an algorithm and
# expect the test to fail
config['algorithms'] = [LabelSmoothing(0.1)]
trainer = Trainer(**config)
trainer.fit()
with pytest.raises(AssertionError):
self.assert_models_equal(trainer.state.model, self.reference_model)
def test_model_init(self, config, *args):
# as a control test, we reinitialize the model weights, and
# expect the resulting trained model to differe from the reference.
config['model'] = SimpleModel()
trainer = Trainer(**config)
trainer.fit()
with pytest.raises(AssertionError):
self.assert_models_equal(trainer.state.model, self.reference_model)
class AssertDataAugmented(Callback):
"""Helper callback that asserts test whether the augmented batch was passed to the model during the forward pass.
The original batch is passed through the model and we assert that the outputs are not the same. This is to be used
in conjunction with an algorithm that augments the data during AFTER_DATALOADER event.
Assumes only one microbatch is used.
"""
def __init__(self, dataset):
self.dataset = dataset
def after_forward(self, state, logger):
if state.device_train_microbatch_size != state.train_dataloader.batch_size: # type: ignore
raise ValueError('This check assumes device_train_microbatch_size == batch_size')
batch_idx = state.timestamp.batch_in_epoch.value
batch_size = len(state.batch[0])
original_batch = self.dataset[batch_idx:batch_idx + batch_size]
original_outputs = state.model(original_batch)
assert not torch.allclose(original_outputs[0], state.outputs[0])
class TestTrainerEvents():
@pytest.fixture
def config(self, rank_zero_seed: int):
dataset = RandomImageDataset(size=16)
return {
'model': SimpleConvModel(),
'train_dataloader': DataLoader(
dataset=dataset,
batch_size=4,
sampler=dist.get_sampler(dataset),
),
'eval_dataloader': None,
'max_duration': '1ep',
'loggers': [],
'seed': rank_zero_seed,
}
def test_data_augmented(self, config):
config['algorithms'] = [CutOut()]
# we give the callback access to the dataset to test
# that the images have been augmented.
config['callbacks'] = [
AssertDataAugmented(dataset=config['train_dataloader'].dataset),
]
trainer = Trainer(**config)
trainer.fit()
def test_data_not_augmented(self, config):
config['callbacks'] = [
AssertDataAugmented(dataset=config['train_dataloader'].dataset),
]
trainer = Trainer(**config)
with pytest.raises(AssertionError):
trainer.fit()
@pytest.mark.world_size(2)
def test_state_run_name():
# seeding with the global rank to ensure that each rank has a different seed
reproducibility.seed_all(dist.get_global_rank())
run_name = _generate_run_name()
# The run name should be the same on every rank -- it is set via a distributed reduction
# Manually verify that all ranks have the same run name
run_names = dist.all_gather_object(run_name)
assert len(run_names) == 2 # 2 ranks
assert all(run_name == run_names[0] for run_name in run_names)
class TestAutoresumeCompatibility:
def get_logger(self,
tmp_path: pathlib.Path,
num_concurrent_uploads: int = 1,
file_path_format_string: Optional[str] = None):
"""Returns an object store logger that saves locally."""
remote_dir = str(tmp_path / 'object_store')
os.makedirs(remote_dir, exist_ok=True)
return RemoteUploaderDownloader(bucket_uri='libcloud://.',
backend_kwargs={
'provider': 'local',
'container': '.',
'provider_kwargs': {
'key': remote_dir,
},
},
num_concurrent_uploads=num_concurrent_uploads,
use_procs=False,
upload_staging_folder=str(tmp_path / 'staging_folder'),
**({
'file_path_format_string': file_path_format_string
} if file_path_format_string is not None else {}))
@pytest.fixture
def config(self):
"""Returns the reference config."""
train_dataset = RandomClassificationDataset()
eval_dataset = RandomClassificationDataset()
return {
'model':
SimpleModel(),
'train_dataloader':
DataLoader(
dataset=train_dataset,
batch_size=4,
sampler=dist.get_sampler(train_dataset),
),
'eval_dataloader':
DataLoader(
dataset=eval_dataset,
sampler=dist.get_sampler(eval_dataset),
),
'max_duration':
'2ep',
'autoresume':
True,
'loggers': [],
}
def test_autoresume_and_concurrent_uploads_error(self, tmp_path: pathlib.Path, config: Dict[str, Any]):
pytest.importorskip('libcloud')
config.update({
'run_name': 'autoresume_concurrent_uploads_run',
'save_folder': str(tmp_path / 'checkpoints'),
'loggers': [self.get_logger(tmp_path, num_concurrent_uploads=2),
self.get_logger(tmp_path)]
})
# Test that trainer errors out if autoresume is set, and RemoteUploaderDownloader does multiple concurrent uploads.
# The root cause of this is that it is possible for an updated symlink file to be uploaded before the corresponding
# checkpoint has finished uploading, and then the run dies, leaving the symlink contents pointing to a checkpoint that
# does not exist
with pytest.raises(ValueError,
match='Multiple concurrent uploads is not currently supported when using autoresume'):
_ = Trainer(**config)
def test_latest_and_object_format_string_error(self, tmp_path: pathlib.Path, config: Dict[str, Any]):
pytest.importorskip('libcloud')
config.update({
'run_name':
'latest_format_string_run',
'save_folder':
str(tmp_path / 'checkpoints'),
'loggers': [
self.get_logger(tmp_path, file_path_format_string='test/{remote_file_name}'),
self.get_logger(tmp_path)
]
})
# Test that trainer errors out if save_latest_filename is set, and RemoteUploaderDownloader file_path_format_string
# is not default. The root cause of this is that the symlink file contents are created outside of the RemoteUploaderDownloader
# and do not take into account its path formatting
with pytest.raises(
ValueError,
match='Specifying a `file_path_format_string` to a `RemoteUploaderDownloader` is not currently supported'
):
_ = Trainer(**config)
# Ensure that if save_latest_filename is not set, it does not error
config.update({'save_latest_filename': None, 'autoresume': False})
_ = Trainer(**config)
def test_autoresume_and_default_remote_uploader_downloader(self, tmp_path: pathlib.Path, config: Dict[str, Any]):
pytest.importorskip('libcloud')
config.update({
'run_name': 'autoresume_default_remote_ud_run',
'save_folder': str(tmp_path / 'checkpoints'),
'loggers': [self.get_logger(tmp_path), self.get_logger(tmp_path)]
})
# Just test that the default args for everything do not hit the above errors
_ = Trainer(**config)
| composer-dev | tests/trainer/test_trainer.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import contextlib
import copy
import os
import pathlib
import shutil
import tarfile
import tempfile
import time
from glob import glob
from typing import Any, Dict, List, Optional, Union
from unittest.mock import MagicMock
import pytest
import torch
import torch.distributed
from pytest import MonkeyPatch
from torch.utils.data import DataLoader
from composer.algorithms import NoOpModel
from composer.callbacks import CheckpointSaver
from composer.core import Callback, Time, TimeUnit
from composer.loggers import RemoteUploaderDownloader, remote_uploader_downloader
from composer.optim import ExponentialScheduler
from composer.trainer import trainer
from composer.trainer.trainer import Trainer
from composer.utils import dist, is_tar
from composer.utils.checkpoint import glob_filter
from composer.utils.object_store.object_store import ObjectStore
from composer.utils.object_store.s3_object_store import S3ObjectStore
from tests.common import (RandomClassificationDataset, RandomImageDataset, SimpleConvModel, SimpleModel, deep_compare,
device)
from tests.common.markers import world_size
class DummyStatefulCallback(Callback):
def __init__(self) -> None:
super().__init__()
self.random_value = time.time_ns()
def state_dict(self) -> Dict[str, Any]:
return {
'random_value': self.random_value,
}
def load_state_dict(self, state: Dict[str, Any]) -> None:
self.random_value = state['random_value']
def _load_checkpoint(filename: Union[str, pathlib.Path]):
filename = str(filename).format(rank=0)
if not is_tar(filename):
return torch.load(filename, map_location='cpu')
with tempfile.TemporaryDirectory() as tmp_dir:
with tarfile.open(filename) as tarball:
tarball.extractall(tmp_dir)
states_path = os.path.join(tmp_dir, 'composer_states.pt')
return torch.load(states_path, map_location='cpu')
@pytest.mark.parametrize(
'remove_field_paths,filter_params',
[
[[['state', 'model', 'classifier', 'weights'], ['state', 'model', 'classifier', 'bias']],
['state/model/classifier/weights', 'state/model/classifier/bias']],
[[
['state', 'model', 'classifier', 'weights'],
['state', 'model', 'classifier', 'bias'],
], ['state/model/classifier/*']],
[
[['state', 'timestep']],
['state/timestep'],
],
[
[['state', 'list_element', 0]],
['state/list_element/0'],
],
[
[['state', 'list_element', 0, 'nested_list_element']],
['state/list_element/0/nested_list_element'],
],
[
# Repeating the zero for the test case as removing a list index shifts everything
[['state', 'list_element', 0], ['state', 'list_element', 0]],
['state/list_element/0', 'state/list_element/1'],
],
[
[
['state', 'model', 'classifier', 'weights'],
['state', 'model', 'layer1', 'weights'],
['state', 'model', 'layer2', 'weights'],
],
['state/model/*/weights'],
],
[
[['state', 'model', 'layer1', 'weights'], ['state', 'model', 'layer2', 'weights']],
['state/model/layer*/weights'],
],
],
)
def test_ignore_params(remove_field_paths: List[List[str]], filter_params: List[str]):
# Set up base dictionary
base_dict = {
'state': {
'run_name': 'my_first_run',
'timestep': 7,
'list_element': [{
'nested_list_element': 'hello'
}, 'world'],
'model': {
'layer1': {
'weights': 6,
'bias': 2
},
'layer2': {
'weights': 7,
'bias': 1
},
'classifier': {
'weights': 5,
'bias': 3
}
}
},
'rng': 0,
}
# Remove classifier params
new_dict = copy.deepcopy(base_dict)
for remove_field_path in remove_field_paths:
temp_dict = base_dict
for step in remove_field_path[:-1]:
temp_dict = temp_dict[step]
del temp_dict[remove_field_path[-1]]
glob_filter(filter_params)(new_dict)
assert base_dict == new_dict
@pytest.mark.parametrize('folder,filename',
[('{run_name}/my_checkpoints', 'ep{epoch}-rank{rank}.pt'),
(pathlib.Path('{run_name}/my_checkpoints'), pathlib.Path('ep{epoch}-rank{rank}.pt'))])
def test_checkpoint_saver_folder_filename_path(folder: Union[str, pathlib.Path], filename: Union[str, pathlib.Path]):
checkpoint_saver = CheckpointSaver(folder=folder, filename=filename)
assert checkpoint_saver.folder == str(folder)
assert checkpoint_saver.filename.filename == str(filename)
@pytest.mark.parametrize(
'remote_file_name,latest_filename,latest_remote_file_name',
[('{run_name}/my_checkpoints/ep{epoch}-ba{batch}-rank{rank}.pt', 'latest-rank{rank}.pt',
'{run_name}/checkpoints/latest-rank{rank}.pt'),
(pathlib.Path('{run_name}/my_checkpoints/ep{epoch}-ba{batch}-rank{rank}.pt'), pathlib.Path('latest-rank{rank}.pt'),
pathlib.Path('{run_name}/checkpoints/latest-rank{rank}.pt'))])
def test_checkpoint_filenames(remote_file_name: Optional[Union[str, pathlib.Path]],
latest_filename: Optional[Union[str, pathlib.Path]],
latest_remote_file_name: Optional[Union[str, pathlib.Path]]):
checkpoint_saver = CheckpointSaver(remote_file_name=remote_file_name,
latest_filename=latest_filename,
latest_remote_file_name=latest_remote_file_name)
assert checkpoint_saver.remote_file_name is not None
assert checkpoint_saver.latest_filename is not None
assert checkpoint_saver.latest_remote_file_name is not None
assert checkpoint_saver.remote_file_name.filename == str(remote_file_name)
assert checkpoint_saver.latest_filename.filename == str(latest_filename)
assert checkpoint_saver.latest_remote_file_name.filename == str(latest_remote_file_name)
@pytest.mark.parametrize('remote_file_name,latest_filename,latest_remote_file_name', [(None, None, None)])
def test_checkpoint_filenames_none(remote_file_name: Optional[Union[str, pathlib.Path]],
latest_filename: Optional[Union[str, pathlib.Path]],
latest_remote_file_name: Optional[Union[str, pathlib.Path]]):
checkpoint_saver = CheckpointSaver(remote_file_name=remote_file_name,
latest_filename=latest_filename,
latest_remote_file_name=latest_remote_file_name)
assert checkpoint_saver.remote_file_name == None
assert checkpoint_saver.latest_filename == None
assert checkpoint_saver.latest_remote_file_name == None
class TestCheckpointSaving:
def get_trainer(self, **kwargs):
model = SimpleConvModel()
return Trainer(model=model, **kwargs)
@pytest.mark.parametrize('add_remote_ud', [True, False])
def test_s3_uri_creates_remote_ud(self, add_remote_ud: bool, monkeypatch: MonkeyPatch):
mock_validate_credentials = MagicMock()
monkeypatch.setattr(remote_uploader_downloader, '_validate_credentials', mock_validate_credentials)
if add_remote_ud:
with pytest.warns(UserWarning):
trainer = self.get_trainer(save_folder='s3://bucket_name/{run_name}/checkpoints',
loggers=[
RemoteUploaderDownloader('s3://bucket_name',
file_path_format_string='{remote_file_name}')
])
else:
trainer = self.get_trainer(save_folder='s3://bucket_name/{run_name}/checkpoints')
remote_uds = [
logger_dest for logger_dest in trainer.logger.destinations
if isinstance(logger_dest, RemoteUploaderDownloader)
]
assert len(remote_uds) == 1
remote_ud = remote_uds[0]
assert remote_ud.remote_backend_name == 's3'
assert remote_ud.remote_bucket_name == 'bucket_name'
@pytest.mark.parametrize('uri', ['wandb://foo/bar', 'gcs://foo/bar', 'sftp://foo/bar"'])
def test_other_uris_error_out(self, uri: str):
with pytest.raises(NotImplementedError):
self.get_trainer(save_folder=uri)
@pytest.mark.parametrize('local_path', ['foo/bar/baz'])
def test_local_paths_work(self, local_path: str):
self.get_trainer(save_folder=local_path)
@pytest.mark.parametrize('save_folder,expected_path',
[('s3://bucket_name/{run_name}/my_checkpoints', '{run_name}/my_checkpoints'),
('{run_name}/my_checkpoints', '{run_name}/my_checkpoints'), ('s3://bucket_name', '')])
def test_checkpoint_saver_properly_constructed(self, save_folder: str, expected_path: str,
monkeypatch: MonkeyPatch):
mock_validate_credentials = MagicMock()
monkeypatch.setattr(remote_uploader_downloader, '_validate_credentials', mock_validate_credentials)
mock_checkpoint_saver = MagicMock()
monkeypatch.setattr(trainer, 'CheckpointSaver', mock_checkpoint_saver)
self.get_trainer(save_folder=save_folder)
expected_prefix = expected_path + '/' if expected_path != '' else expected_path
rest_of_checkpoint_saver_kwargs = {
'filename': 'ep{epoch}-ba{batch}-rank{rank}.pt',
'remote_file_name': expected_prefix + 'ep{epoch}-ba{batch}-rank{rank}.pt',
'latest_filename': 'latest-rank{rank}.pt',
'latest_remote_file_name': expected_prefix + 'latest-rank{rank}.pt',
'overwrite': False,
'weights_only': False,
'save_interval': '1ep',
'num_checkpoints_to_keep': -1
}
expected_folder = expected_path.rstrip('/') if expected_path != '' else '.'
mock_checkpoint_saver.assert_called_once_with(folder=expected_folder, **rest_of_checkpoint_saver_kwargs)
class TestCheckpointLoading:
def _assert_weights_equivalent(self, m1: torch.nn.Module, m2: torch.nn.Module):
for p1, p2 in zip(m1.parameters(), m2.parameters()):
torch.testing.assert_close(p1, p2)
def get_trainer(self, **kwargs):
model = SimpleConvModel()
optimizer = torch.optim.Adam(model.parameters())
train_dataset = RandomImageDataset()
eval_dataset = RandomImageDataset()
train_batch_size = 2
return Trainer(
model=model,
train_dataloader=DataLoader(
dataset=train_dataset,
batch_size=train_batch_size,
sampler=dist.get_sampler(train_dataset),
),
eval_dataloader=DataLoader(
dataset=eval_dataset,
batch_size=4,
sampler=dist.get_sampler(eval_dataset),
),
device_train_microbatch_size=train_batch_size // 2,
precision='fp32',
train_subset_num_batches=5,
eval_subset_num_batches=1,
save_interval='1ep',
eval_interval='1ep',
save_filename='ep{epoch}.pt',
max_duration='2ep',
optimizers=optimizer,
schedulers=ExponentialScheduler(gamma=0.9),
callbacks=[DummyStatefulCallback()],
**kwargs,
)
def get_logger(self, tmp_path: pathlib.Path):
"""Returns an object store logger that saves locally."""
remote_dir = str(tmp_path / 'object_store')
os.makedirs(remote_dir, exist_ok=True)
return RemoteUploaderDownloader(
bucket_uri='libcloud://.',
backend_kwargs={
'provider': 'local',
'container': '.',
'provider_kwargs': {
'key': remote_dir,
},
},
num_concurrent_uploads=1,
use_procs=False,
upload_staging_folder=str(tmp_path / 'staging_folder'),
)
@pytest.mark.parametrize('load_path,load_object_store',
[('s3://my-bucket/my-run-name/my-checkpoints', None),
('s3://my-bucket/my-run-name/my-checkpoints', S3ObjectStore(bucket='my-bucket')),
('my-run-name/my-checkpoints', S3ObjectStore(bucket='my-bucket'))])
def test_load_from_uri(self, load_path: str, load_object_store: Optional[ObjectStore], monkeypatch: MonkeyPatch):
mock_validate_credentials = MagicMock()
monkeypatch.setattr(remote_uploader_downloader, '_validate_credentials', mock_validate_credentials)
mock_load_checkpoint = MagicMock()
monkeypatch.setattr(trainer.checkpoint, 'load_checkpoint', mock_load_checkpoint)
self.get_trainer(load_path=load_path, load_object_store=load_object_store)
mock_load_checkpoint.assert_called_once()
(_, call_kwargs), = mock_load_checkpoint.call_args_list
assert call_kwargs['path'] == 'my-run-name/my-checkpoints'
assert isinstance(call_kwargs['object_store'], S3ObjectStore)
assert call_kwargs['object_store'].bucket == 'my-bucket'
@pytest.mark.parametrize('load_path', [
'sftp://my-bucket/my-run-name/my-checkpoints', 'wandb://my-bucket/my-run-name/my-checkpoints',
'gcs://my-bucket/my-run-name/my-checkpoints'
])
def test_other_backends_error(self, load_path: str, monkeypatch: MonkeyPatch):
mock_validate_credentials = MagicMock()
monkeypatch.setattr(remote_uploader_downloader, '_validate_credentials', mock_validate_credentials)
with pytest.raises(NotImplementedError):
self.get_trainer(load_path=load_path)
@device('cpu', 'gpu')
@pytest.mark.parametrize('load_weights_only', [True, False])
def test_load_weights(self, device, load_weights_only):
trainer_1 = self.get_trainer(save_folder='first', device=device)
trainer_1.fit()
trainer_1.close()
last_checkpoint = os.path.join('first', 'ep2.pt')
trainer_2 = self.get_trainer(
load_path=last_checkpoint,
load_weights_only=load_weights_only,
load_strict_model_weights=load_weights_only,
)
# check weights loaded properly
self._assert_weights_equivalent(
trainer_1.state.model,
trainer_2.state.model,
)
# check callbacks state
stateful_callbacks_equal = self._stateful_callbacks_equal(
trainer_1.state.callbacks,
trainer_2.state.callbacks,
)
if load_weights_only:
# callback state should not have been loaded
assert not stateful_callbacks_equal
else:
assert stateful_callbacks_equal
def _stateful_callbacks_equal(self, callbacks1, callbacks2):
cb1 = next((cb for cb in callbacks1 if isinstance(cb, DummyStatefulCallback)))
cb2 = next((cb for cb in callbacks2 if isinstance(cb, DummyStatefulCallback)))
return cb1.random_value == cb2.random_value
def test_load_weights_object_store(self, tmp_path):
pytest.importorskip('libcloud')
trainer_1 = self.get_trainer(
save_folder='{run_name}/checkpoints',
loggers=[self.get_logger(tmp_path)],
run_name='electric-zebra',
)
trainer_1.fit()
trainer_1.close()
trainer_2 = self.get_trainer(
loggers=[self.get_logger(tmp_path)],
run_name='electric-zebra',
load_path='electric-zebra/checkpoints/latest-rank0.pt',
load_object_store=self.get_logger(tmp_path),
)
# check weights loaded properly
self._assert_weights_equivalent(
trainer_1.state.model,
trainer_2.state.model,
)
@world_size(1, 2)
@device('cpu', 'gpu')
@pytest.mark.parametrize('use_object_store', [True, False])
@pytest.mark.parametrize('delete_local', [True, False])
def test_autoresume(self, device: str, tmp_path: pathlib.Path, use_object_store: bool, delete_local: bool,
world_size: int):
if delete_local and not use_object_store:
pytest.skip('Invalid test setting.')
if use_object_store:
pytest.importorskip('libcloud')
trainer_1 = self.get_trainer(
save_folder='first',
device=device,
run_name='big-chungus',
loggers=[self.get_logger(tmp_path)] if use_object_store else [],
)
# trains the model, saving the checkpoint files
trainer_1.fit()
trainer_1.close()
if delete_local:
# delete files locally, forcing trainer to look in object store
shutil.rmtree('first')
trainer_2 = self.get_trainer(
save_folder='first',
device=device,
run_name='big-chungus',
autoresume=True,
load_path='ignore_me.pt', # this should be ignored
loggers=[self.get_logger(tmp_path)] if use_object_store else [],
)
self._assert_weights_equivalent(
trainer_1.state.model,
trainer_2.state.model,
)
assert trainer_1.state.run_name == trainer_2.state.run_name
def test_different_run_names(self):
trainer_1 = self.get_trainer(
save_folder='first/',
seed=12345,
)
trainer_1.fit()
trainer_1.close()
trainer_2 = self.get_trainer(
load_path=os.path.join('first', 'ep2.pt'),
seed=12345,
)
assert trainer_1.state.run_name != trainer_2.state.run_name
@device('cpu', 'gpu')
@pytest.mark.parametrize('save_overwrite', [True, False])
def test_save_overwrite(self, device, save_overwrite):
trainer_1 = self.get_trainer(
save_folder='first',
device=device,
)
trainer_1.fit()
trainer_1.close()
ctx = None
if save_overwrite:
ctx = contextlib.nullcontext()
else:
ctx = pytest.raises(FileExistsError)
with ctx: # expect FileExistsError if save_overwrite=False
trainer_2 = self.get_trainer(
save_folder='first',
save_overwrite=save_overwrite,
load_path=os.path.join('first', 'ep1.pt'),
device=device,
)
trainer_2.fit(duration='1ba')
# loading from the last checkpoint should work regardless
# of save_overwrite, as new checkpoints are later in time.
trainer_3 = self.get_trainer(
save_folder='first',
save_overwrite=save_overwrite,
load_path=os.path.join('first', 'ep2.pt'),
device=device,
)
trainer_3.fit(duration='1ba')
def test_autoload_algorithm_old_checkpoint(self):
trainer_1 = self.get_trainer(
save_folder='first',
algorithms=[NoOpModel()],
)
trainer_1.fit()
trainer_1.close()
trainer_2 = self.get_trainer(
load_path=os.path.join('first', 'ep1.pt'),
algorithms=[NoOpModel()],
)
trainer_2.fit(duration='1ba')
# Monkeypatch algorithm to have different signature
old_init, old_repr = NoOpModel.__init__, NoOpModel.__repr__
NoOpModel.__init__ = lambda self, x: None # type: ignore
NoOpModel.__repr__ = lambda self: 'NoOpModel(3)'
with pytest.warns(UserWarning, match='required_on_load algorithm.*'), pytest.raises(
ValueError, match='loaded state dict contains a parameter group.*'):
trainer_3 = self.get_trainer(load_path=os.path.join('first', 'ep1.pt'),)
trainer_3.fit(duration='1ba')
# Restore algorithm
NoOpModel.__init__, NoOpModel.__repr__ = old_init, old_repr
class TestCheckpointResumption:
def get_trainer(self, **kwargs):
model = SimpleModel()
optimizer = torch.optim.Adam(model.parameters())
train_dataset = RandomClassificationDataset(size=24)
eval_dataset = RandomClassificationDataset(size=12)
train_batch_size = 2
return Trainer(
model=model,
train_dataloader=DataLoader(
dataset=train_dataset,
batch_size=train_batch_size,
sampler=dist.get_sampler(train_dataset),
),
eval_dataloader=DataLoader(
dataset=eval_dataset,
batch_size=2,
sampler=dist.get_sampler(eval_dataset),
),
device_train_microbatch_size=train_batch_size // 2,
precision='fp32',
train_subset_num_batches=5,
max_duration='2ep',
optimizers=optimizer,
schedulers=ExponentialScheduler(gamma=0.9),
callbacks=[DummyStatefulCallback()],
**kwargs,
)
@pytest.mark.parametrize('world_size', [
pytest.param(1),
pytest.param(2, marks=pytest.mark.world_size(2)),
])
@pytest.mark.parametrize('device,deepspeed_zero_stage', [
pytest.param('cpu', None, id='cpu-ddp'),
pytest.param('gpu', None, id='gpu-ddp', marks=pytest.mark.gpu),
pytest.param('gpu', 0, id='deepspeed-zero0', marks=pytest.mark.gpu),
pytest.param('gpu', 1, id='deepspeed-zero1', marks=pytest.mark.gpu),
pytest.param('gpu', 2, id='deepspeed-zero2', marks=pytest.mark.gpu),
])
@pytest.mark.parametrize(
'seed,save_interval,save_filename,resume_file,final_checkpoint',
[
[None, '1ep', 'ep{epoch}-rank{rank}.pt', 'ep1-rank{rank}.pt', 'latest-rank{rank}.pt'
], # test randomized seed saving and symlinking
[42, '1ep', 'ep{epoch}-rank{rank}.pt', 'ep1-rank{rank}.pt', 'ep2-rank{rank}.pt'], # test save at epoch end
[42, '1ep', 'ep{epoch}-rank{rank}.tgz', 'ep1-rank{rank}.tgz', 'ep2-rank{rank}.tgz'
], # test tarball with compression
[42, '2ba', 'ba{batch}-rank{rank}.pt', 'ba4-rank{rank}.pt', 'ba8-rank{rank}.pt'
], # test save batch in partial epoch
[42, '1ba', 'ba{batch}-rank{rank}.pt', 'ba5-rank{rank}.pt', 'ba8-rank{rank}.pt'
], # test save batch at epoch end
[42, '2ba', 'ba{batch}-rank{rank}.pt', 'ba6-rank{rank}.pt', 'ba8-rank{rank}.pt'
], # test save batch after complete epoch
],
)
def test_resumption(
self,
device: str,
world_size: int,
deepspeed_zero_stage: Optional[int],
save_interval: str,
save_filename: str,
resume_file: str,
final_checkpoint: str,
seed: Optional[int],
tmp_path: pathlib.Path,
):
# all ranks use rank 0 folder
tmp_paths = dist.all_gather_object(os.path.abspath(tmp_path))
save_folder = pathlib.Path(tmp_paths[0])
if deepspeed_zero_stage:
deepspeed_config = {'zero_optimization': {'stage': deepspeed_zero_stage}}
# save_checkpoint appends .tar for deepspeed
if not is_tar(resume_file):
resume_file += '.tar'
if not is_tar(final_checkpoint):
final_checkpoint += '.tar'
else:
deepspeed_config = None
trainer_1 = self.get_trainer(
save_folder=os.path.join(save_folder, 'first'),
save_filename=save_filename,
save_interval=save_interval,
eval_interval=save_interval,
deepspeed_config=deepspeed_config,
seed=seed,
device=device,
)
trainer_1.fit()
trainer_1.close()
self._assert_expected_num_checkpoints(
save_folder=os.path.join(save_folder, 'first'),
save_interval=save_interval,
num_epochs=2, # set in get_trainer()
num_batches_per_epoch=5, # set in get_trainer()
is_deepspeed=deepspeed_config is not None,
)
if not deepspeed_config:
# for DDP training, only rank 0 saves
resume_file = resume_file.format(rank=0)
resume_file = os.path.join(save_folder, 'first', resume_file)
trainer_2 = self.get_trainer(
save_folder=os.path.join(save_folder, 'second'),
save_filename=save_filename,
save_interval=save_interval,
eval_interval=save_interval,
deepspeed_config=deepspeed_config,
seed=seed,
device=device,
load_path=resume_file, # <-- resume training from file
)
trainer_2.fit()
trainer_2.close()
self._assert_checkpoints_equivalent(
save_folder / 'first' / final_checkpoint,
save_folder / 'second' / final_checkpoint,
)
def _assert_checkpoints_equivalent(self, file1, file2):
checkpoint_1 = _load_checkpoint(file1)
checkpoint_2 = _load_checkpoint(file2)
# Remove the wall clock time
del checkpoint_1['state']['timestamp']['Timestamp']['total_wct']
del checkpoint_1['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint_1['state']['timestamp']['Timestamp']['batch_wct']
del checkpoint_2['state']['timestamp']['Timestamp']['total_wct']
del checkpoint_2['state']['timestamp']['Timestamp']['epoch_wct']
del checkpoint_2['state']['timestamp']['Timestamp']['batch_wct']
# Remove run_name, since it's a function of time
del checkpoint_1['state']['run_name']
del checkpoint_2['state']['run_name']
deep_compare(checkpoint_1, checkpoint_2)
# deepspeed checkpoints do not have model or optimizer
# so either model, optimizer should be in all checkpoints or in none
keys_in = (
'model' in checkpoint_1['state'],
'optimizers' in checkpoint_1['state'],
'model' in checkpoint_2['state'],
'optimizers' in checkpoint_2['state'],
)
assert all(keys_in) or not any(keys_in)
def _assert_expected_num_checkpoints(
self,
save_folder: str,
save_interval: str,
num_epochs: int,
num_batches_per_epoch: int,
is_deepspeed: bool,
):
interval = Time.from_timestring(save_interval)
if interval.unit == TimeUnit.EPOCH:
expected_num_files = ((num_epochs - 1) // interval.value) + 1
else:
expected_num_files = ((num_batches_per_epoch * num_epochs - 1) // interval.value) + 1
expected_num_files += 1 # account for symlink
if is_deepspeed:
# each rank saves
expected_num_files *= dist.get_world_size()
files = os.listdir(save_folder)
assert len(files) == expected_num_files
@pytest.mark.parametrize('world_size', [
pytest.param(1),
pytest.param(2, marks=pytest.mark.world_size(2)),
])
@pytest.mark.parametrize('device,deepspeed_enabled,zero_stage', [
pytest.param('cpu', False, None, id='cpu-ddp'),
pytest.param('gpu', False, None, id='gpu-ddp', marks=pytest.mark.gpu),
pytest.param('gpu', True, 0, id='deepspeed-zero0', marks=pytest.mark.gpu),
pytest.param('gpu', True, 1, id='deepspeed-zero1', marks=pytest.mark.gpu),
pytest.param('gpu', True, 2, id='deepspeed-zero2', marks=pytest.mark.gpu),
])
def test_rotate_checkpoints(
world_size,
device,
deepspeed_enabled,
zero_stage,
tmp_path: pathlib.Path,
):
num_keep = 5
# all ranks use rank 0 folder
tmp_paths = dist.all_gather_object(os.path.abspath(tmp_path))
save_folder = tmp_paths[0]
deepseed_config = None
if deepspeed_enabled:
deepseed_config = {'zero_optimization': {'stage': zero_stage}}
train_dataset = RandomImageDataset()
trainer = Trainer(
model=SimpleConvModel(),
train_dataloader=DataLoader(
dataset=train_dataset,
sampler=dist.get_sampler(train_dataset),
),
save_folder=str(save_folder),
save_filename='checkpoint_{rank}_{batch}.pt',
save_interval='1ba',
max_duration='10ba',
save_num_checkpoints_to_keep=num_keep,
device=device,
deepspeed_config=deepseed_config,
)
trainer.fit()
dist.barrier() # ensure all checkpoints rotated across ranks
# deepspeed saves 1 file per rank
expected_num = num_keep if not deepspeed_enabled else num_keep * world_size
files = glob(os.path.join(save_folder, 'checkpoint_*'))
assert len(files) == expected_num
dist.barrier() # all ranks finish before cleaning up tmpdir
| composer-dev | tests/trainer/test_checkpoint.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# disabling general type issues because of monkeypatching
#yright: reportGeneralTypeIssues=none
"""Fixtures available in doctests.
The script is run before any doctests are executed,
so all imports and variables are available in any doctest.
The output of this setup script does not show up in the documentation.
"""
import logging
logging.basicConfig(level=logging.WARN)
import os
import sys
import tempfile
from typing import Any
from typing import Callable as Callable
from urllib.parse import urlparse
import numpy as np
import pytest
import torch
import torch.optim
import torch.utils.data
from PIL import Image
from torch.optim.lr_scheduler import CosineAnnealingLR
import composer
import composer.loggers
import composer.loggers.remote_uploader_downloader
import composer.trainer
import composer.trainer.trainer
import composer.utils
import composer.utils.checkpoint
import composer.utils.file_helpers
from composer import Trainer
from composer.core import Algorithm as Algorithm
from composer.core import Callback as Callback
from composer.core import DataSpec as DataSpec
from composer.core import Engine as Engine
from composer.core import Evaluator as Evaluator
from composer.core import Event as Event
from composer.core import State as State
from composer.core import Time as Time
from composer.core import Timestamp as Timestamp
from composer.core import TimeUnit as TimeUnit
from composer.core import types as types
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.devices import DeviceCPU
from composer.loggers import InMemoryLogger as InMemoryLogger
from composer.loggers import Logger as Logger
from composer.loggers import RemoteUploaderDownloader
from composer.models import ComposerModel as ComposerModel
from composer.optim.scheduler import ConstantScheduler
from composer.utils import LibcloudObjectStore
from composer.utils import ensure_tuple as ensure_tuple
try:
import wandb
_WANDB_INSTALLED = True
del wandb # unused
except ImportError:
_WANDB_INSTALLED = False
try:
import comet_ml
_COMETML_INSTALLED = True
del comet_ml # unused
except ImportError:
_COMETML_INSTALLED = False
try:
import libcloud
_LIBCLOUD_INSTALLED = True
del libcloud # unused
except ImportError:
_LIBCLOUD_INSTALLED = False
# Need to insert the repo root at the beginning of the path, since there may be other modules named `tests`
# Assuming that docs generation is running from the `docs` directory
_docs_dir = os.path.abspath('.')
_repo_root = os.path.dirname(_docs_dir)
if sys.path[0] != _repo_root:
sys.path.insert(0, _repo_root)
from tests.common import SimpleModel
from tests.common.datasets import RandomTextClassificationDataset
# Disable wandb
os.environ['WANDB_MODE'] = 'disabled'
# Change the cwd to be the tempfile, so we don't pollute the documentation source folder
tmpdir = tempfile.mkdtemp()
cwd = os.path.abspath('.')
os.chdir(tmpdir)
num_channels = 3
num_classes = 10
data_shape = (num_channels, 5, 5)
Model = SimpleModel
model = SimpleModel(num_channels, num_classes)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
scheduler = CosineAnnealingLR(optimizer, T_max=1)
dataset = SyntheticBatchPairDataset(
total_dataset_size=100,
data_shape=data_shape,
num_classes=num_classes,
num_unique_samples_to_create=10,
)
train_dataset = dataset
eval_dataset = dataset
batch_size = 10
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=0,
pin_memory=False,
drop_last=True,
)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
num_workers=0,
pin_memory=False,
drop_last=False,
)
state = State(
rank_zero_seed=0,
model=model,
run_name='run_name',
device=DeviceCPU(),
optimizers=optimizer,
device_train_microbatch_size=batch_size,
dataloader=train_dataloader,
dataloader_label='train',
max_duration='1ep',
precision='fp32',
)
logger = Logger(state)
engine = Engine(state, logger)
image = Image.fromarray(np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8))
# error: "randn" is not a known member of module (reportGeneralTypeIssues)
X_example = torch.randn(batch_size, num_channels, 32, 32) # type: ignore
# error: "randn" is not a known member of module (reportGeneralTypeIssues)
logits = torch.randn(batch_size, num_classes) # type: ignore
# error: "randint" is not a known member of module (reportGeneralTypeIssues)
y_example = torch.randint(num_classes, (batch_size,)) # type: ignore
def loss_fun(output, target, reduction='none'):
"""Dummy loss function."""
return torch.ones_like(target)
# Patch Trainer __init__ function to replace arguments while preserving type
_original_trainer_init = Trainer.__init__
def _new_trainer_init(self, fake_ellipses: None = None, **kwargs: Any):
if 'model' not in kwargs:
kwargs['model'] = model
if 'optimizers' not in kwargs:
kwargs['optimizers'] = torch.optim.SGD(kwargs['model'].parameters(), lr=0.01)
if 'schedulers' not in kwargs:
kwargs['schedulers'] = ConstantScheduler()
if 'max_duration' not in kwargs:
kwargs['max_duration'] = '1ep'
if 'train_dataloader' not in kwargs:
kwargs['train_dataloader'] = train_dataloader
if 'eval_dataloader' not in kwargs:
kwargs['eval_dataloader'] = eval_dataloader
if 'progress_bar' not in kwargs:
kwargs['progress_bar'] = False # hide tqdm logging
if 'log_to_console' not in kwargs:
kwargs['log_to_console'] = False # hide console logging
if 'save_folder' in kwargs and urlparse(kwargs['save_folder']).scheme == 'gs':
os.environ['GCS_KEY'] = 'foo'
os.environ['GCS_SECRET'] = 'foo'
if 'load_path' in kwargs and urlparse(kwargs['load_path']).scheme in ['s3', 'oci', 'gs']:
if urlparse(kwargs['load_path']).scheme == 'gs':
os.environ['GCS_KEY'] = 'foo'
os.environ['GCS_SECRET'] = 'foo'
kwargs['load_path'] = urlparse(kwargs['load_path']).path.lstrip('/')
kwargs['load_object_store'] = LibcloudObjectStore()
_original_trainer_init(self, **kwargs)
Trainer.__init__ = _new_trainer_init
# Do not attempt to validate cloud credentials
def _do_not_validate(*args, **kwargs) -> None:
pass
composer.loggers.remote_uploader_downloader._validate_credentials = _do_not_validate # type: ignore
# Patch RemoteUploaderDownloader __init__ function to replace arguments while preserving type
_original_RemoteUploaderDownloader_init = RemoteUploaderDownloader.__init__
def _new_RemoteUploaderDownloader_init(self, fake_ellipses: None = None, **kwargs: Any):
os.makedirs('./object_store', exist_ok=True)
kwargs.update(use_procs=False,
num_concurrent_uploads=1,
bucket_uri='libcloud://.',
backend_kwargs={
'provider': 'local',
'container': '.',
'provider_kwargs': {
'key': os.path.abspath('./object_store'),
},
})
_original_RemoteUploaderDownloader_init(self, **kwargs)
RemoteUploaderDownloader.__init__ = _new_RemoteUploaderDownloader_init # type: ignore
# Patch ObjectStore __init__ function to replace arguments while preserving type
_original_libcloudObjectStore_init = LibcloudObjectStore.__init__
def _new_libcloudObjectStore_init(self, fake_ellipses: None = None, **kwargs: Any):
os.makedirs('./object_store', exist_ok=True)
kwargs.update(
provider='local',
container='.',
provider_kwargs={
'key': os.path.abspath('./object_store'),
},
)
_original_libcloudObjectStore_init(self, **kwargs)
LibcloudObjectStore.__init__ = _new_libcloudObjectStore_init # type: ignore
# Note: These methods are an alternative to the tiny_bert fixtures in fixtures.py.
# Fixtures cannot be used natively as parametrized inputs, which we require when
# we wish to run a test across multiple models, one of which is a HuggingFace BERT Tiny.
# As a workaround, we inject objects into the PyTest namespace. Tests should not directly
# use pytest.{var}, but instead should import and use the helper copy methods configure_{var}
# (in tests.common.models) so the objects in the PyTest namespace do not change.
try:
import transformers
del transformers
TRANSFORMERS_INSTALLED = True
except ImportError:
TRANSFORMERS_INSTALLED = False
if TRANSFORMERS_INSTALLED:
from tests.fixtures.fixtures import (tiny_bert_config_helper, tiny_bert_model_helper, tiny_bert_tokenizer_helper,
tiny_gpt2_config_helper, tiny_gpt2_model_helper, tiny_gpt2_tokenizer_helper)
pytest.tiny_bert_config = tiny_bert_config_helper() # type: ignore
pytest.tiny_bert_model = tiny_bert_model_helper(pytest.tiny_bert_config) # type: ignore
pytest.tiny_bert_tokenizer = tiny_bert_tokenizer_helper() # type: ignore
pytest.tiny_gpt2_config = tiny_gpt2_config_helper() # type: ignore
pytest.tiny_gpt2_model = tiny_gpt2_model_helper(pytest.tiny_gpt2_config) # type: ignore
pytest.tiny_gpt2_tokenizer = tiny_gpt2_tokenizer_helper() # type: ignore
| composer-dev | docs/source/doctest_fixtures.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Configuration file for the Sphinx documentation builder.
This file only contains a selection of the most common options. For a full
list see the documentation:
https://www.sphinx-doc.org/en/master/usage/configuration.html
-- Path setup --------------------------------------------------------------
If extensions (or modules to document with autodoc) are in another directory,
add these directories to sys.path here. If the directory is relative to the
documentation root, use os.path.abspath to make it absolute, like shown here.
"""
import ast
import importlib
import inspect
import json
import os
import shutil
import sys
import tempfile
import types
import warnings
from typing import Any, Dict, List, Tuple, Type
import sphinx.application
import sphinx.ext.autodoc
import sphinx.util.logging
import torch
import torch.nn
from docutils import nodes
from docutils.nodes import Element
from git.repo.base import Repo
from pypandoc.pandoc_download import download_pandoc
from sphinx.ext.autodoc import ClassDocumenter, _
from sphinx.writers.html5 import HTML5Translator
import composer
if not shutil.which('pandoc'):
# Install pandoc if it is not installed.
# Pandoc is required by nbconvert but it is not included in the pypandoc pip package
with tempfile.TemporaryDirectory() as tmpdir:
# if root on linux, use the "/bin" folder, since "~/bin" = "/root/bin" is not in the path by default
# similar on osx -- use /Applications instead of "~/Applications" = "/root/Applications"
target_folder = None
if os.getuid() == 0:
if sys.platform == 'linux':
target_folder = '/bin'
elif sys.platform == 'darwin':
target_folder = '/Applications/pandoc'
# Not handling windows; nobody uses root on windows lol
download_pandoc(version='2.19.2', download_folder=tmpdir, targetfolder=target_folder, delete_installer=True)
sys.path.insert(0, os.path.abspath('..'))
log = sphinx.util.logging.getLogger(__name__)
# -- Project information -----------------------------------------------------
project = 'MosaicML'
copyright = '2022, MosaicML, Inc.'
author = 'MosaicML'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinxcontrib.katex',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxemoji.sphinxemoji',
'sphinxext.opengraph',
'sphinx_copybutton',
'myst_parser',
'sphinxarg.ext',
'sphinx.ext.doctest',
'sphinx_panels',
'sphinxcontrib.images',
'nbsphinx',
]
def _get_commit_sha() -> str:
"""Determines the commit sha.
Returns:
str: The git commit sha, as a string.
"""
repo_root = os.path.join(os.path.dirname(__file__), '..', '..')
repo = Repo(repo_root)
if repo.is_dirty():
warning_msg = 'The git repo is dirty. The commit sha for source code links will be incorrect.'
if os.environ.get('CI', '0') == '0':
# If developing locally, warn.
warnings.warn(warning_msg)
else:
# If on CI, error.
raise RuntimeError(warning_msg)
_commit_sha = repo.commit().hexsha
return _commit_sha
_COMMIT_SHA = _get_commit_sha()
# Don't show notebook output in the docs
nbsphinx_execute = 'never'
notebook_path = 'mosaicml/composer/blob/' + _COMMIT_SHA + '/{{ env.doc2path(env.docname, base=None) }}'
# Include an "Open in Colab" link at the beginning of all notebooks
nbsphinx_prolog = f"""
.. tip::
This tutorial is available as a `Jupyter notebook <https://github.com/{notebook_path}>`_.
.. image:: https://colab.research.google.com/assets/colab-badge.svg
:target: https://colab.research.google.com/github/{notebook_path}
:alt: Open in Colab
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build', 'Thumbs.db', '.DS_Store', 'examples/imagenet/README.md', 'examples/segmentation/README.md'
]
napoleon_custom_sections = [('Returns', 'params_style')]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
# Make sure the target is unique
autosectionlabel_prefix_document = True
autosummary_imported_members = False
autosectionlabel_maxdepth = 5
autosummary_generate = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_title = ' Composer'
# Customize CSS
html_css_files = ['css/custom.css', 'https://cdn.jsdelivr.net/npm/@docsearch/css@3']
html_js_files = ['js/posthog.js']
# Mosaic logo
# html_logo = 'https://storage.googleapis.com/docs.mosaicml.com/images/logo-dark-bg.png'
html_theme_options = {
'light_logo': 'logo-light-mode.png',
'dark_logo': 'logo-dark-mode.png',
'light_css_variables': {
'color-brand-primary': '#343434',
'color-brand-content': '#343434',
},
'dark_css_variables': {
'color-brand-primary': '#f9f9f9',
'color-brand-content': '#f9f9f9',
},
}
# Favicon
html_favicon = 'https://mosaic-ml-staging.cdn.prismic.io/mosaic-ml-staging/b1f1a2a0-2b54-4b43-9b76-bfa2e24d6fdf_favicon.svg'
# Don't unfold our common type aliases
autodoc_type_aliases = {
'Batch': 'composer.core.types.Batch',
}
autodoc_default_options = {
# don't document the forward() method. Because of how torch.nn.Module.forward is defined in the
# base class, sphinx does not realize that forward overrides an inherited method.
'exclude-members': 'hparams_registry'
}
autodoc_inherit_docstrings = False
# Monkeypatch some objects as to exclude their docstrings
torch.nn.Module.forward.__doc__ = ''
torch.nn.Module.forward.__doc__ = None
pygments_style = 'manni'
pygments_dark_style = 'monokai'
html_permalinks = True
html_permalinks_icon = '#'
images_config = {
'download': False,
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'torchvision': ('https://pytorch.org/vision/stable/', None),
'torchtext': ('https://pytorch.org/text/stable/', None),
'torchmetrics': ('https://torchmetrics.readthedocs.io/en/latest/', None),
'libcloud': ('https://libcloud.readthedocs.io/en/stable/', None),
'PIL': ('https://pillow.readthedocs.io/en/stable', None),
'coolname': ('https://coolname.readthedocs.io/en/latest/', None),
'datasets': ('https://huggingface.co/docs/datasets/master/en/', None),
'transformers': ('https://huggingface.co/docs/transformers/master/en/', None),
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'botocore': ('https://botocore.amazonaws.com/v1/documentation/api/latest', None),
}
nitpicky = False # warn on broken links
nitpick_ignore = [
('py:class', 'type'),
('py:class', 'optional'),
('py:meth', 'wandb.init'),
('py:attr', 'wandb.run.config'),
('py:attr', 'wandb.run.tags'),
('py:meth', 'torch.save'),
('py:meth', 'torch.load'),
('py:class', 'T_nnModule'),
]
python_use_unqualified_type_names = True
autodoc_typehints = 'none'
def skip_redundant_namedtuple_attributes(
app: sphinx.application.Sphinx,
what: str,
name: str,
obj: Any,
skip: bool,
options: sphinx.ext.autodoc.Options,
):
"""Hide the default, duplicate attributes for named tuples."""
del app, what, name, skip, options
if '_tuplegetter' in obj.__class__.__name__:
return True
return None
with open(os.path.join(os.path.dirname(__file__), 'doctest_fixtures.py'), 'r') as f:
doctest_global_setup = f.read()
with open(os.path.join(os.path.dirname(__file__), 'doctest_cleanup.py'), 'r') as f:
doctest_global_cleanup = f.read()
def rstjinja(app, docname, source):
"""Render our pages as a jinja template for fancy templating goodness."""
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
src = source[0]
rendered = app.builder.templates.render_string(src, app.config.html_context)
source[0] = rendered
def get_algorithms_metadata() -> Dict[str, Dict[str, str]]:
"""Get the metadata for algorithms from the ``metadata.json`` files."""
EXCLUDE = ['no_op_model']
root = os.path.join(os.path.dirname(__file__), '..', '..', 'composer', 'algorithms')
algorithms = next(os.walk(root))[1]
algorithms = [algo for algo in algorithms if algo not in EXCLUDE]
metadata = {}
for name in algorithms:
json_path = os.path.join(root, name, 'metadata.json')
if os.path.isfile(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
for key, value in data.items():
if key in metadata:
raise ValueError(f'Duplicate keys in metadata: {key}')
metadata[key] = value
if not metadata:
raise ValueError(f'No metadata found, {root} not correctly configured.')
return metadata
html_context = {'metadata': get_algorithms_metadata()}
# ClassDocumenter.add_directive_header uses ClassDocumenter.add_line to
# write the class documentation.
# We'll monkeypatch the add_line method and intercept lines that begin
# with "Bases:".
# In order to minimize the risk of accidentally intercepting a wrong line,
# we'll apply this patch inside of the add_directive_header method.
# From https://stackoverflow.com/questions/46279030/how-can-i-prevent-sphinx-from-listing-object-as-a-base-class
add_line = ClassDocumenter.add_line
line_to_delete = _('Bases: %s') % u':py:class:`object`'
def _auto_rst_for_module(module: types.ModuleType, exclude_members: List[Any]) -> str:
"""Generate the content of an rst file documenting a module.
Includes the module docstring, followed by tables for the functions,
classes, and exceptions
Args:
module: The module object to document
exclude_members: A list of Python objects to exclude from the
documentation. Providing objects that are not imported in
``module`` are ignored.
Returns:
The rst content for the module
"""
name = module.__name__
lines = []
functions: List[Tuple[str, types.FunctionType]] = []
exceptions: List[Tuple[str, Type[BaseException]]] = []
classes: List[Tuple[str, Type[object]]] = []
methods: List[Tuple[str, types.MethodType]] = []
attributes: List[Tuple[str, object]] = []
# add title and module docstring
lines.append(f'{name}')
lines.append(f'{"=" * len(name)}\n')
lines.append(f'.. automodule:: {name}\n')
# set prefix so that we can use short names in the autosummaries
lines.append(f'.. currentmodule:: {name}')
try:
all_members = list(module.__all__)
except AttributeError:
all_members = list(vars(module).keys())
for item_name, val in vars(module).items():
if val in exclude_members:
continue
if item_name.startswith('_'):
# Skip private members
continue
if item_name not in all_members:
# Skip members not in `__all__``
continue
if isinstance(val, types.ModuleType):
# Skip modules; those are documented by autosummary
continue
if isinstance(val, types.FunctionType):
functions.append((item_name, val))
elif isinstance(val, types.MethodType):
methods.append((item_name, val))
elif isinstance(val, type) and issubclass(val, BaseException):
exceptions.append((item_name, val))
elif isinstance(val, type):
assert issubclass(val, object)
classes.append((item_name, val))
else:
attributes.append((item_name, val))
continue
# Sort by the reimported name
functions.sort(key=lambda x: x[0])
exceptions.sort(key=lambda x: x[0])
classes.sort(key=lambda x: x[0])
attributes.sort(key=lambda x: x[0])
for category, category_name in ((functions, 'Functions'), (classes, 'Classes'), (exceptions, 'Exceptions')):
sphinx_lines = []
for item_name, _ in category:
sphinx_lines.append(f' {item_name}')
if len(sphinx_lines) > 0:
lines.append(f'\n.. rubric:: {category_name}\n')
lines.append('.. autosummary::')
lines.append(' :toctree: generated')
lines.append(' :nosignatures:')
if category_name in ('Classes', 'Hparams'):
lines.append(' :template: classtemplate.rst')
elif category_name == 'Functions':
lines.append(' :template: functemplate.rst')
lines.append('')
lines.extend(sphinx_lines)
lines.append('')
lines.append('.. This file autogenerated by docs/source/conf.py\n')
return '\n'.join(lines)
def _modules_to_rst() -> List[types.ModuleType]:
"""Return the list of modules for which to generate API reference rst files."""
# adding composer.functional to the below list yields:
# AttributeError: module 'composer' has no attribute 'functional'
import composer.functional as cf
document_modules: List[types.Module] = [
composer,
cf,
composer.utils.dist,
composer.utils.reproducibility,
composer.core.types,
]
exclude_modules: List[types.Module] = [composer.trainer, composer._version]
for name in composer.__dict__:
obj = composer.__dict__[name]
if isinstance(obj, types.ModuleType) and obj not in exclude_modules:
document_modules.append(obj)
return document_modules
def _generate_rst_files_for_modules() -> None:
"""Generate .rst files for each module to include in the API reference.
These files contain the module docstring followed by tables listing all
the functions, classes, etc.
"""
docs_dir = os.path.abspath(os.path.dirname(__file__))
module_rst_save_dir = os.path.join(docs_dir, 'api_reference')
# gather up modules to generate rst files for
document_modules = _modules_to_rst()
# rip out types that are duplicated in top-level composer module
composer_imported_types = []
for name in composer.__all__:
obj = composer.__dict__[name]
if not isinstance(obj, types.ModuleType):
composer_imported_types.append(obj)
document_modules = sorted(document_modules, key=lambda x: x.__name__)
os.makedirs(module_rst_save_dir, exist_ok=True)
for module in document_modules:
saveas = os.path.join(module_rst_save_dir, module.__name__ + '.rst')
print(f'Generating rst file {saveas} for module: {module.__name__}')
# avoid duplicate entries in docs. We add torch's _LRScheduler to
# types, so we get a ``WARNING: duplicate object description`` if we
# don't exclude it
exclude_members = [torch.optim.lr_scheduler._LRScheduler]
if module is not composer:
exclude_members += composer_imported_types
content = _auto_rst_for_module(module, exclude_members=exclude_members)
with open(saveas, 'w') as f:
f.write(content)
def _add_line_no_object_base(self, text, *args, **kwargs):
if text.strip() == line_to_delete:
return
add_line(self, text, *args, **kwargs)
add_directive_header = ClassDocumenter.add_directive_header
def _add_directive_header_no_object_base(self, *args, **kwargs):
"""Hide that all classes inherit from the base class ``object``."""
self.add_line = _add_line_no_object_base.__get__(self)
result = add_directive_header(self, *args, **kwargs)
del self.add_line
return result
ClassDocumenter.add_directive_header = _add_directive_header_no_object_base
def _recursive_getattr(obj: Any, path: str):
parts = path.split('.')
try:
obj = getattr(obj, parts[0])
except AttributeError:
return None
path = '.'.join(parts[1:])
if path == '':
return obj
else:
return _recursive_getattr(obj, path)
def _determine_lineno_of_attribute(module: types.ModuleType, attribute: str):
# inspect.getsource() does not work with module-level attributes
# instead, parse the module manually using ast, and determine where
# the expression was defined
source = inspect.getsource(module)
filename = inspect.getsourcefile(module)
assert filename is not None, f'filename for module {module} could not be found'
ast_tree = ast.parse(source, filename)
for stmt in ast_tree.body:
if isinstance(stmt, ast.Assign):
if any(isinstance(x, ast.Name) and x.id == attribute for x in stmt.targets):
return stmt.lineno
return None
class PatchedHTMLTranslator(HTML5Translator):
"""Open all external links in a new tab."""
# Adapted from https://stackoverflow.com/a/61669375
def visit_reference(self, node: Element) -> None:
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
# ---------------------------------------------------------
# Customize behavior (open in new tab, secure linking site)
if 'refid' not in node and (not any(node['refuri'].startswith(x)
for x in ('/', 'https://docs.mosaicml.com', '#'))):
# If there's a refid, or the refuri starts with a non-external uri scheme, then it's an internal
# (hardcoded) link, so don't open that in a new tab
# Otherwise, it's really an external link. Open it in a new tab.
atts['target'] = '_blank'
atts['rel'] = 'noopener noreferrer'
# ---------------------------------------------------------
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def setup(app: sphinx.application.Sphinx):
"""Setup hook."""
_generate_rst_files_for_modules()
app.connect('autodoc-skip-member', skip_redundant_namedtuple_attributes)
app.connect('source-read', rstjinja)
app.set_translator('html', PatchedHTMLTranslator)
| composer-dev | docs/source/conf.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Cleanup script that is executed at the end of each doctest."""
import os
import shutil
# variables are defined in doctest_fixtures.py
# pyright: reportUndefinedVariable=none
# tmpdir and cwd were defined in doctest_fixtures.py
os.chdir(cwd)
try:
shutil.rmtree(tmpdir)
except OSError:
pass
| composer-dev | docs/source/doctest_cleanup.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper function to generate the README table."""
import os
from pathlib import Path
from . import utils
HEADER = ['Task', 'Dataset', 'Name', 'Quality', 'Metric', 'TTT', 'Hparams']
ATTRIBUTES = ['_task', '_dataset', '_name', '_quality', '_metric', '_ttt', '_hparams']
folder_path = Path(__file__).parent
models = utils.list_dirs(folder_path)
if not len(models):
raise ValueError(f'Found 0 models in {folder_path}')
print(f'Found {len(models)} models')
metadata = utils.get_metadata(
names=models,
attributes=ATTRIBUTES,
module_basepath='composer.models',
)
# add extra keys
for name, md in metadata.items():
md['_github_link'] = f'{name}/'
md['_hparams_path'] = os.path.join('composer', 'yamls', 'models', md['_hparams'])
md['_hparams_link'] = f"../yamls/models/{md['_hparams']}"
# define row format
row = [
'{_task}',
'{_dataset}',
'[{_name}]({_github_link})',
'{_quality}',
'{_metric}',
'{_ttt}',
'[{_hparams_path}]({_hparams_link})',
]
table_md = utils.build_markdown_table(
header=HEADER,
metadata=metadata,
sorted_keys=sorted(metadata.keys()),
row_format=row,
)
| composer-dev | docs/source/tables/update_model_tables.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Table helpers for composer docs."""
| composer-dev | docs/source/tables/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for auto-generating tables from metadata."""
import importlib
import os
import shutil
import tempfile
def list_dirs(folder):
"""Lists all dirs for a given folder.
Args:
folder (str): The folder to list dirs for.
"""
return sorted(
child.name for child in folder.iterdir() if child.is_dir() and ('__pycache__' not in str(child.absolute())))
def assert_attributes_exist(name, module_dict, attributes):
"""Assert that module has the provided attributes.
Args:
name (str): The class name.
module_dict (dict): The dict form of the class.
attributes (list): The attributes to check for.
"""
for attribute in attributes:
assert attribute in module_dict, \
f'{name} should define {attribute} in its __init__.py file.'
def get_metadata(names, attributes, module_basepath):
"""Returns a nested dict of metadata with names as keys.
Checks that all attributes exist in module given by module_basepath.name.
Args:
names (str): The module names.
attributes (list): The attributes to check for.
module_basepath (str): The import path of the module.
Example::
>>> get_metadata(
names=['blurpool', 'label_smoothing'],
attributes=['_name', '_tldr'],
module_basepath='composer.algorithms'
)
{'blurpool': {'_name': ..., '_tldr': ...},
'label_smoothing': {'_name': ..., '_tldr': ...}}
"""
metadata = {}
for name in names:
module = importlib.import_module(f'{module_basepath}.{name}')
if hasattr(module, '_metadata'):
for subname in getattr(module, '_metadata'):
submodule_dict = getattr(module, '_metadata')[subname]
assert_attributes_exist(f'{name}/{subname}', submodule_dict, attributes)
metadata[subname] = {a: submodule_dict[a] for a in attributes}
# check for attributes with empty strings
for attribute in attributes:
if not metadata[subname][attribute]:
print(f'WARNING: {subname} has empty metadata {attribute}')
else:
module_dict = module.__dict__
assert_attributes_exist(name, module_dict, attributes)
metadata[name] = {a: module_dict[a] for a in attributes}
# check for attributes with empty strings
for attribute in attributes:
if not metadata[name][attribute]:
print(f'WARNING: {name} has empty metadata {attribute}')
return metadata
def build_markdown_table(header, metadata, sorted_keys, row_format):
"""Builds a markdown table, formatting `row_format` with the `metadata`.
Entries in the table are ordered by `sorted_keys`.
Args:
header (list): list of header strings
metadata (dict): nested dict of metadata
sorted_keys (list): order of rows in table
row_format (list): list of length(header). Elements are either a string
or a single-argument callable that returns a string.
Returns:
table_md (list): table in markdown format
"""
table_md = _print_row(header)
table_md += _print_row(['-' * len(h) for h in header])
for name in sorted_keys:
d = metadata[name]
# single-argument callable that returns a string is used for conditional formats
# e.g. to only print link if provided, define
# lambda d: '[Link]({_link})' if d[_link] else ''
row = [r(d).format(**d) if callable(r) else r.format(**d) for r in row_format]
table_md += _print_row(row)
return table_md
def _print_row(row):
return '|'.join(row) + '\n'
def index_tag_in_lines(lines, tag):
"""Returns line number where tag is found.
Args:
lines (list): List of lines to check.
tag (str): Tag to find.
"""
for index, line in enumerate(lines):
if tag in line:
return index
raise ValueError(f'{tag} not found.')
def update_table_in_file(table, source_file):
"""Updates the table content based on a source file.
Given a `source file`, updates the table. Searches
the file for 'Table Start' and 'Table End' tags, and replaces
the content between those tags.
The original file is retained with the `.bkp` suffix.
Args:
table (list): list of strings
source_file (path): path to source file
"""
with open(source_file, 'r') as source, \
tempfile.NamedTemporaryFile('w', delete=False) as temp:
source_lines = source.readlines()
table_start = index_tag_in_lines(source_lines, tag='Table Start')
table_end = index_tag_in_lines(source_lines, tag='Table End')
print(f'Found table_start tag at line no: {table_start}')
print(f'Found table_end tag at line no: {table_end}')
assert table_end > table_start, 'Table End must be after Table Start'
table_written = False
for line_no, line in enumerate(source_lines):
if line_no <= table_start or line_no >= table_end:
temp.write(line)
elif not table_written: # write table once
temp.writelines(table)
table_written = True
backup_file = source_file.with_suffix('.md.bkp')
os.rename(source_file, backup_file)
print(f'Original file backed up at: {backup_file}')
shutil.copy(temp.name, source_file)
| composer-dev | docs/source/tables/utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Script to generate graphs for repo README.md.
After generating images, upload to public hosting and update the README URLs.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
models = [{
'name': 'GPT-2 125M',
'baseline': 255,
'performance': 150
}, {
'name': 'ResNet-50',
'baseline': 116,
'performance': 15
}, {
'name': 'DeepLab-v3',
'baseline': 110,
'performance': 36
}]
def generate_graph(filename, light_mode=True):
"""Generate Graphs.
Args:
filename (_type_): Name of output image SVG file
light_mode (bool, optional): Render in light mode. Defaults to True.
"""
font_color = 'black' if light_mode else 'white'
mpl.rcParams['text.color'] = font_color
mpl.rcParams['axes.labelcolor'] = font_color
mpl.rcParams['xtick.color'] = font_color
mpl.rcParams['ytick.color'] = font_color
labels = [model['name'] for model in models]
baselines = [model['baseline'] for model in models]
performances = [model['performance'] for model in models]
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots()
ax.grid(which='major', axis='y')
ax.set_axisbelow(True)
rects1 = ax.bar(x - width / 2, baselines, width, label='Vanilla PyTorch', color=['#CCCCCC'])
rects2 = ax.bar(x + width / 2, performances, width, label='Composer', color=['#EA4335'])
ax.set_title('Cost comparison: Vanilla PyTorch vs. Composer')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=5, frameon=False)
ax.bar_label(rects1, padding=3, fmt='$%g')
ax.bar_label(rects2, padding=3, fmt='$%g')
def format_cost(x, pos=None):
return f'${int(x)}'
ax.get_yaxis().set_major_formatter(format_cost)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_color('black' if light_mode else 'white')
fig.tight_layout()
plt.savefig(filename, transparent=True)
generate_graph('lightmode.svg', light_mode=True)
generate_graph('darkmode.svg', light_mode=False)
| composer-dev | docs/source/tables/generate_cost_graphs.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper function to generate the method overview rst."""
import json
import os
from pathlib import Path
import utils
import composer
EXCLUDE_METHODS = ['no_op_model', 'utils']
folder_path = os.path.join(os.path.dirname(composer.__file__), 'algorithms')
methods = utils.list_dirs(Path(folder_path))
methods = [m for m in methods if m not in EXCLUDE_METHODS]
if not len(methods):
raise ValueError(f'Found 0 methods in {folder_path}')
print(f'Found {len(methods)} methods with metadata.')
badges = {'nlp': ':badge:`NLP,badge-success`', 'cv': ':badge:`CV,badge-primary`'}
overview_path = os.path.join(os.path.dirname(__file__), '..', 'method_cards', 'methods_overview.rst')
print('table_path ', overview_path)
with open(overview_path, 'w') as overview_file:
overview_file.write("""
|:black_joker:| Methods Overview
================================
.. panels::
:card: shadow
""")
metadata = {}
for name in methods:
json_path = os.path.join(folder_path, name, 'metadata.json')
with open(json_path, 'r') as f:
metadata[name] = json.load(f)[name]
badge_string = ' '.join([badges[domain] for domain in metadata[name]['domains']])
overview_file.write(f"""
---
.. link-button:: {name}.html
:text: {metadata[name]['class_name']}
:classes: card-title
{badge_string}
++++++++++++++
{metadata[name]['tldr']}
""")
print(f'Table written to {overview_path}')
| composer-dev | docs/source/tables/update_methods_overview.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper function to generate the README table."""
import json
import os
from pathlib import Path
import utils
import composer
from composer import functional as CF
EXCLUDE_METHODS = ['no_op_model', 'utils']
HEADER = ['Name', 'Functional', 'Attribution', 'tl;dr']
ATTRIBUTES = ['class_name', 'functional', 'tldr', 'attribution', 'link']
GITHUB_BASE = 'https://github.com/mosaicml/composer/tree/dev/composer/algorithms/'
folder_path = os.path.join(os.path.dirname(composer.__file__), 'algorithms')
methods = utils.list_dirs(Path(folder_path))
methods = [m for m in methods if m not in EXCLUDE_METHODS]
if not len(methods):
raise ValueError(f'Found 0 methods in {folder_path}')
print(f'Found {len(methods)} methods with metadata.')
metadata = {}
for name in methods:
json_path = os.path.join(folder_path, name, 'metadata.json')
with open(json_path, 'r') as f:
metadata[name] = json.load(f)[name]
# test functional method is importable
method_functional = metadata[name]['functional']
if method_functional and not hasattr(CF, method_functional):
raise ImportError(f'Unable to import functional form {method_functional} for {name}')
metadata[name]['functional'] = f'`cf.{method_functional}`'
metadata[name]['github_link'] = GITHUB_BASE + name
# define row format
row = [
'[{class_name}]({github_link})',
'{functional}',
lambda d: '[{attribution}]({link})' if d['link'] else ['attribution'],
'{tldr}',
]
table_md = utils.build_markdown_table(
header=HEADER,
metadata=metadata,
sorted_keys=sorted(metadata.keys()),
row_format=row,
)
table_path = os.path.join(os.path.dirname(__file__), 'algorithms_table.md')
with open(table_path, 'w') as f:
f.write(table_md)
print(f'Table written to {table_path}')
| composer-dev | docs/source/tables/update_alg_tables.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiling Example.
For a walk-through of this example, please see the `profiling guide</trainer/performance_tutorials/profiling>`_.
"""
# [imports-start]
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from composer import Trainer
from composer.models import mnist_model
from composer.profiler import JSONTraceHandler, cyclic_schedule
from composer.profiler.profiler import Profiler
# [imports-end]
# [dataloader-start]
# Specify Dataset and Instantiate DataLoader
batch_size = 2048
data_directory = '~/datasets'
mnist_transforms = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.MNIST(data_directory, train=True, download=True, transform=mnist_transforms)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=False,
drop_last=True,
pin_memory=True,
persistent_workers=True,
num_workers=8,
)
# [dataloader-end]
# Instantiate Model
model = mnist_model(num_classes=10)
# [trainer-start]
# Instantiate the trainer
composer_trace_dir = 'composer_profiler'
torch_trace_dir = 'torch_profiler'
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
eval_dataloader=train_dataloader,
max_duration=2,
device='gpu' if torch.cuda.is_available() else 'cpu',
eval_interval=0,
precision='amp' if torch.cuda.is_available() else 'fp32',
train_subset_num_batches=16,
profiler=Profiler(
trace_handlers=[JSONTraceHandler(folder=composer_trace_dir, overwrite=True)],
schedule=cyclic_schedule(
wait=0,
warmup=1,
active=4,
repeat=1,
),
torch_prof_folder=torch_trace_dir,
torch_prof_overwrite=True,
))
# [trainer-end]
# [fit-start]
# Run training
trainer.fit()
# [fit-end]
| composer-dev | examples/profiler_demo.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Save and Load Checkpoints with `Weights and Biases <https://wandb.ai/>`."""
import shutil
import torch.utils.data
from torch.optim import SGD
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from composer import Trainer
from composer.loggers import WandBLogger
from composer.models.classify_mnist import mnist_model
# Configure the WandBLogger to log artifacts, and set the project name
# The project name must be deterministic, so we can restore from it
wandb_logger = WandBLogger(
log_artifacts=True,
project='my-wandb-project-name',
)
# Configure the trainer -- here, we train a simple MNIST classifier
print('Starting the first training run\n')
model = mnist_model(num_classes=10)
optimizer = SGD(model.parameters(), lr=0.01)
train_dataloader = torch.utils.data.DataLoader(
dataset=MNIST('~/datasets', train=True, download=True, transform=ToTensor()),
batch_size=2048,
)
eval_dataloader = torch.utils.data.DataLoader(
dataset=MNIST('~/datasets', train=True, download=True, transform=ToTensor()),
batch_size=2048,
)
trainer = Trainer(
model=model,
max_duration='1ep',
optimizers=optimizer,
# Train Data Configuration
train_dataloader=train_dataloader,
train_subset_num_batches=5, # For this example, limit each epoch to 5 batches
# Evaluation Configuration
eval_dataloader=eval_dataloader,
eval_subset_num_batches=5, # For this example, limit evaluation to 5 batches
# Checkpoint Saving Configuration
loggers=wandb_logger, # Log checkpoints via the WandB Logger
save_folder='checkpoints', # This the folder that checkpoints are saved to locally and remotely.
save_interval='1ep',
save_filename='epoch{epoch}.pt', # Name checkpoints like epoch1.pt, epoch2.pt, etc...
save_num_checkpoints_to_keep=0, # Do not keep any checkpoints locally after they have been uploaded to W & B
)
# Train!
trainer.fit()
# Remove the temporary folder to ensure that the checkpoint is downloaded from the cloud
shutil.rmtree('checkpoints', ignore_errors=True)
# Close the existing trainer to trigger W & B to mark the run as "finished", and be ready for the next training run
trainer.close()
# Construct a new trainer that loads from the previous checkpoint
print('\nStarting the second training run\n')
trainer = Trainer(
model=model,
max_duration='2ep', # Train to 2 epochs in total
optimizers=optimizer,
# Train Data Configuration
train_dataloader=train_dataloader,
train_subset_num_batches=5, # For this example, limit each epoch to 5 batches
# Evaluation Configuration
eval_dataloader=eval_dataloader,
eval_subset_num_batches=5, # For this example, limit evaluation to 5 batches
# Checkpoint Loading Configuration
load_object_store=wandb_logger,
# Load the checkpoint using the save_folder plus the save_filename -- WandB requires that the load_path include the "latest" tag
load_path='checkpoints/epoch1.pt:latest',
# (Optional) Checkpoint Saving Configuration to continue to save new checkpoints
loggers=wandb_logger, # Log checkpoints via the WandB Logger
save_folder='checkpoints', # The trainer requires that checkpoints must be saved locally before being uploaded
save_interval='1ep',
save_filename='epoch{epoch}.pt', # Name checkpoints like epoch1.pt, epoch2.pt, etc...
save_num_checkpoints_to_keep=0, # Do not keep any checkpoints locally after they have been uploaded to W & B
)
# Verify that we loaded the checkpoint. This should print 1ep, since we already trained for 1 epoch.
print(f'\nResuming training at epoch {trainer.state.timestamp.epoch}\n')
# Train for another epoch!
trainer.fit()
| composer-dev | examples/checkpoint_with_wandb.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Written by Gihyun Park, Junyeol Lee, and Jiwon Seo
"""Example for training with an algorithm on a custom model."""
import torch
import torch.nn as nn
import torch.utils.data
from torchvision import datasets, transforms
import composer.models
import composer.optim
from composer import Trainer
# Example algorithms to train with
from composer.algorithms import GyroDropout
# Your custom model
class VGG13Model(composer.models.ComposerClassifier):
"""Your custom model."""
def __init__(self, num_hidden: int, num_classes: int) -> None:
module = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Flatten(1),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, num_classes),
)
super().__init__(module=module, num_classes=num_classes)
# Your custom train dataloader
train_dataloader = torch.utils.data.DataLoader(
dataset=datasets.CIFAR10('/datasets/',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]),
download=True),
drop_last=False,
shuffle=True,
batch_size=256,
)
# Your custom eval dataloader
eval_dataloader = torch.utils.data.DataLoader(
dataset=datasets.CIFAR10('/datasets/',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])),
drop_last=False,
shuffle=False,
batch_size=256,
)
model = VGG13Model(num_hidden=64, num_classes=10).to('cuda')
optimizer = composer.optim.DecoupledSGDW(
model.parameters(),
lr=0.05,
momentum=0.9,
weight_decay=0.0005,
)
# Initialize Trainer with custom model, custom train and eval datasets, and algorithms to train with
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='100ep',
optimizers=optimizer,
algorithms=[GyroDropout()])
trainer.fit()
| composer-dev | examples/gyro_dropout_example.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Example for training with an algorithm on a custom model."""
import torch
import torch.utils.data
from torchvision import datasets, transforms
import composer.models
from composer import Trainer
# Example algorithms to train with
from composer.algorithms import CutOut, LabelSmoothing
# Your custom model
class SimpleModel(composer.models.ComposerClassifier):
"""Your custom model."""
def __init__(self, num_hidden: int, num_classes: int):
module = torch.nn.Sequential(
torch.nn.Flatten(start_dim=1),
torch.nn.Linear(28 * 28, num_hidden),
torch.nn.Linear(num_hidden, num_classes),
)
self.num_classes = num_classes
super().__init__(module=module, num_classes=num_classes)
# Your custom train dataloader
train_dataloader = torch.utils.data.DataLoader(
dataset=datasets.MNIST('/datasets/', train=True, transform=transforms.ToTensor(), download=True),
drop_last=False,
shuffle=True,
batch_size=256,
)
# Your custom eval dataloader
eval_dataloader = torch.utils.data.DataLoader(
dataset=datasets.MNIST('/datasets/', train=False, transform=transforms.ToTensor()),
drop_last=False,
shuffle=False,
batch_size=256,
)
# Initialize Trainer with custom model, custom train and eval datasets, and algorithms to train with
trainer = Trainer(model=SimpleModel(num_hidden=128, num_classes=10),
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration='3ep',
algorithms=[CutOut(num_holes=1, length=0.5), LabelSmoothing(0.1)])
trainer.fit()
| composer-dev | examples/custom_models.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Example script to train a DeepLabv3+ model on ADE20k for semantic segmentation."""
import argparse
import logging
import os
import torch
import torchvision
from torch.utils.data import DataLoader
from torchmetrics import MetricCollection
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from composer import DataSpec, Time, Trainer
from composer.algorithms import EMA, SAM, ChannelsLast, MixUp
from composer.callbacks import CheckpointSaver, ImageVisualizer, LRMonitor, SpeedMonitor
from composer.datasets.ade20k import (ADE20k, PadToSize, PhotometricDistoration, RandomCropPair, RandomHFlipPair,
RandomResizePair)
from composer.datasets.utils import NormalizationFn, pil_image_collate
from composer.loggers import WandBLogger
from composer.loss import DiceLoss, soft_cross_entropy
from composer.metrics import CrossEntropy, MIoU
from composer.models import ComposerClassifier
from composer.models.deeplabv3.model import deeplabv3
from composer.optim import CosineAnnealingScheduler, DecoupledSGDW
from composer.utils import dist
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
# Dataloader command-line arguments
parser.add_argument('data_dir', help='Path to the directory containing the ImageNet-1k dataset', type=str)
parser.add_argument('--download',
help='Use to download ADE20k from the internet and put it in the `data_dir`',
action='store_true')
parser.add_argument('--train_resize_size', help='Training image resize size', type=int, default=512)
parser.add_argument('--eval_resize_size', help='Evaluation image resize size', type=int, default=512)
parser.add_argument('--train_batch_size', help='Train dataloader per-device batch size', type=int, default=128)
parser.add_argument('--eval_batch_size', help='Validation dataloader per-device batch size', type=int, default=128)
# Model command-line arguments
parser.add_argument('--backbone_arch',
help='Architecture to use for the backbone.',
default='resnet101',
choices=['resnet50', 'resnet101'])
parser.add_argument('--sync_bn',
help='Use sync BatchNorm. Recommended if the per device microbatch size is below 16',
action='store_true')
parser.add_argument('--cross_entropy_weight', help='Weight to scale the cross entropy loss', type=float, default=0.375)
parser.add_argument('--dice_weight', help='Weight to scale the dice loss', type=float, default=1.125)
# Optimizer command-line arguments
parser.add_argument('--learning_rate', help='Optimizer learning rate', type=float, default=0.08)
parser.add_argument('--momentum', help='Optimizer momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', help='Optimizer weight decay', type=float, default=5.0e-5)
# Save checkpoint command-line arguments
parser.add_argument('--save_checkpoint_dir',
help='Directory in which to save model checkpoints',
type=str,
default='checkpoints/{run_name}')
parser.add_argument('--checkpoint_interval',
help='Frequency to save checkpoints',
type=Time.from_timestring,
default='1ep')
# Load checkpoint command-line arguments, assumes resuming from a previous training run (as opposed to fine-tuning)
parser.add_argument('--load_checkpoint_path', help='Path to the checkpoint to load', type=str)
# Recipes command-line argument
parser.add_argument('--recipe_name',
help='Algorithmic recipes to be applied to the trainer',
choices=['mild', 'medium', 'hot'])
# Logger command-line arguments
# Note: Only Weights and Biases to minimize arguments. Other loggers can be used by adjusting the script
parser.add_argument('--wandb_logger', help='Whether or not to log results to Weights and Biases', action='store_true')
parser.add_argument('--wandb_entity', help='WandB entity name', type=str)
parser.add_argument('--wandb_project', help='WandB project name', type=str)
parser.add_argument('--image_viz', help='Whether or not to log images using ImageVisualizer', action='store_true')
# Trainer arguments
parser.add_argument('--device_train_microbatch_size',
help='Size of train microbatch size if running on GPU',
default='auto')
parser.add_argument('--run_name', help='Name of the training run used for checkpointing and logging', type=str)
parser.add_argument('--seed', help='Random seed', type=int, default=17)
parser.add_argument('--max_duration',
help='Duration to train specified as a Time string',
type=Time.from_timestring,
default='128ep')
args = parser.parse_args()
IMAGENET_CHANNEL_MEAN = (int(0.485 * 255), int(0.456 * 255), int(0.406 * 255))
IMAGENET_CHANNEL_STD = (int(0.229 * 255), int(0.224 * 255), int(0.225 * 255))
ADE20K_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
ADE20K_FILE = 'ADEChallengeData2016.zip'
def _main():
# Divide batch size by number of devices
if dist.get_world_size() > 1:
args.train_batch_size = args.train_batch_size // dist.get_world_size()
args.eval_batch_size = args.eval_batch_size // dist.get_world_size()
# Train dataset code
logging.info('Building train dataloader')
if args.download:
torchvision.datasets.utils.download_and_extract_archive(url=ADE20K_URL,
download_root=args.data_dir,
filename=ADE20K_FILE,
remove_finished=True)
# Adjust the data_dir to include the extracted directory
args.data_dir = os.path.join(args.data_dir, 'ADEChallengeData2016')
# Training transforms applied to both the image and target
train_both_transforms = torch.nn.Sequential(
RandomResizePair(
min_scale=0.5,
max_scale=2.0,
base_size=(args.train_resize_size, args.train_resize_size),
),
RandomCropPair(
crop_size=(args.train_resize_size, args.train_resize_size),
class_max_percent=0.75,
num_retry=10,
),
RandomHFlipPair(),
)
# Training transforms applied to the image only
train_image_transforms = torch.nn.Sequential(
PhotometricDistoration(
brightness=32. / 255,
contrast=0.5,
saturation=0.5,
hue=18. / 255,
),
PadToSize(
size=(args.train_resize_size, args.train_resize_size),
fill=IMAGENET_CHANNEL_MEAN,
),
)
# Training transforms applied to the target only
train_target_transforms = PadToSize(size=(args.train_resize_size, args.train_resize_size), fill=0)
# Create ADE20k train dataset
train_dataset = ADE20k(
datadir=args.data_dir,
split='training',
image_transforms=train_image_transforms,
target_transforms=train_target_transforms,
both_transforms=train_both_transforms,
)
# Create ADE20k train dataloader
train_sampler = None
if dist.get_world_size():
# Nifty function to instantiate a PyTorch DistributedSampler based on your hardware setup
train_sampler = dist.get_sampler(train_dataset, drop_last=True, shuffle=True)
train_dataloader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
num_workers=8,
pin_memory=True,
drop_last=True, # Prevents using a smaller batch at the end of an epoch
sampler=train_sampler,
collate_fn=pil_image_collate,
persistent_workers=True,
)
# DataSpec enables image normalization to be performed on-GPU, marginally relieving dataloader bottleneck
train_dataspec = DataSpec(dataloader=train_dataloader,
device_transforms=NormalizationFn(mean=IMAGENET_CHANNEL_MEAN,
std=IMAGENET_CHANNEL_STD,
ignore_background=True))
logging.info('Built train dataloader\n')
# Validation dataset code
logging.info('Building evaluation dataloader')
# Validation image and target transformations
image_transforms = transforms.Resize(size=(args.eval_resize_size, args.eval_resize_size),
interpolation=InterpolationMode.BILINEAR)
target_transforms = transforms.Resize(size=(args.eval_resize_size, args.eval_resize_size),
interpolation=InterpolationMode.NEAREST)
# Create ADE20k validation dataset
val_dataset = ADE20k(datadir=args.data_dir,
split='validation',
both_transforms=None,
image_transforms=image_transforms,
target_transforms=target_transforms)
#Create ADE20k validation dataloader
val_sampler = None
if dist.get_world_size():
# Nifty function to instantiate a PyTorch DistributedSampler based on your hardware
val_sampler = dist.get_sampler(val_dataset, drop_last=False, shuffle=False)
val_dataloader = DataLoader(
val_dataset,
batch_size=args.eval_batch_size,
num_workers=8,
pin_memory=True,
drop_last=False,
sampler=val_sampler,
collate_fn=pil_image_collate,
persistent_workers=True,
)
# DataSpec enables image normalization to be performed on-GPU, marginally relieving dataloader bottleneck
val_dataspec = DataSpec(dataloader=val_dataloader,
device_transforms=NormalizationFn(mean=IMAGENET_CHANNEL_MEAN,
std=IMAGENET_CHANNEL_STD,
ignore_background=True))
logging.info('Built validation dataset\n')
logging.info('Building Composer DeepLabv3+ model')
# Create a DeepLabv3+ model
model = deeplabv3(
num_classes=150,
backbone_arch=args.backbone_arch,
backbone_weights='IMAGENET1K_V2',
sync_bn=args.sync_bn,
use_plus=True,
)
# Initialize the classifier head only since the backbone uses pre-trained weights
def weight_init(module: torch.nn.Module):
if isinstance(module, (torch.nn.Linear, torch.nn.Conv2d)):
torch.nn.init.kaiming_normal_(module.weight)
if isinstance(module, torch.nn.BatchNorm2d):
torch.nn.init.ones_(module.weight)
torch.nn.init.zeros_(module.bias)
model.classifier.apply(weight_init) # type: ignore Does not recognize classifier as a torch.nn.Module
# Loss function to use during training
# This ignores index -1 since the NormalizationFn transformation sets the background class to -1
dice_loss_fn = DiceLoss(softmax=True, batch=True, ignore_absent_classes=True)
def combo_loss(output, target):
loss = {}
loss['cross_entropy'] = soft_cross_entropy(output, target, ignore_index=-1)
loss['dice'] = dice_loss_fn(output, target)
loss['total'] = args.cross_entropy_weight * loss['cross_entropy'] + args.dice_weight * loss['dice']
return loss
# Training and Validation metrics to log throughout training
train_metrics = MetricCollection([CrossEntropy(ignore_index=-1), MIoU(num_classes=150, ignore_index=-1)])
val_metrics = MetricCollection([CrossEntropy(ignore_index=-1), MIoU(num_classes=150, ignore_index=-1)])
# Create a ComposerClassifier using the model, loss function, and metrics
composer_model = ComposerClassifier(module=model,
train_metrics=train_metrics,
val_metrics=val_metrics,
loss_fn=combo_loss)
logging.info('Built Composer DeepLabv3+ model\n')
logging.info('Building optimizer and learning rate scheduler')
# Optimizer
optimizer = DecoupledSGDW(composer_model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Only use a LR schedule if no recipe is specified or if the hot recipe was specified
lr_scheduler = None
if args.recipe_name is None or args.recipe_name == 'hot':
lr_scheduler = CosineAnnealingScheduler()
logging.info('Built optimizer and learning rate scheduler')
logging.info('Building callbacks: SpeedMonitor, LRMonitor, and CheckpointSaver')
speed_monitor = SpeedMonitor(window_size=50) # Measures throughput as samples/sec and tracks total training time
lr_monitor = LRMonitor() # Logs the learning rate
# Callback for checkpointing
checkpoint_saver = CheckpointSaver(folder=args.save_checkpoint_dir, save_interval=args.checkpoint_interval)
logging.info('Built callbacks: SpeedMonitor, LRMonitor, and CheckpointSaver\n')
# Recipes for training DeepLabv3+ on ImageNet in order of increasing training time and accuracy
# To learn about individual methods, check out "Methods Overview" in our documentation: https://docs.mosaicml.com/
logging.info('Building algorithm recipes')
if args.recipe_name == 'mild':
algorithms = [
ChannelsLast(),
EMA(half_life='1000ba', update_interval='10ba'),
]
elif args.recipe_name == 'medium':
algorithms = [
ChannelsLast(),
EMA(half_life='1000ba', update_interval='10ba'),
SAM(rho=0.3, interval=2),
MixUp(alpha=0.2),
]
elif args.recipe_name == 'hot':
algorithms = [
ChannelsLast(),
EMA(half_life='2000ba', update_interval='1ba'),
SAM(rho=0.3, interval=1),
MixUp(alpha=0.5),
]
else:
algorithms = None
logging.info('Built algorithm recipes\n')
# Weight and Biases logger if specified in commandline
logger = None
if args.wandb_logger:
logging.info('Building Weights and Biases logger')
if args.wandb_entity is None:
raise ValueError('Please specify --wandb_entity argument')
if args.wandb_project is None:
raise ValueError('Please specify --wandb_project argument')
logger = WandBLogger(entity=args.wandb_entity, project=args.wandb_project)
logging.info('Built Weights and Biases logger')
callbacks = [speed_monitor, lr_monitor, checkpoint_saver]
if args.image_viz:
callbacks.append(ImageVisualizer(mode='segmentation'))
# Create the Trainer!
logging.info('Building Trainer')
device = 'gpu' if torch.cuda.is_available() else 'cpu'
precision = 'amp' if device == 'gpu' else 'fp32' # Mixed precision for fast training when using a GPU
device_train_microbatch_size = 'auto' if device == 'gpu' else args.device_train_microbatch_size # If on GPU, use 'auto' gradient accumulation
trainer = Trainer(run_name=args.run_name,
model=composer_model,
train_dataloader=train_dataspec,
eval_dataloader=val_dataspec,
eval_interval='1ep',
optimizers=optimizer,
schedulers=lr_scheduler,
algorithms=algorithms,
loggers=logger,
max_duration=args.max_duration,
callbacks=callbacks,
load_path=args.load_checkpoint_path,
device=device,
precision=precision,
device_train_microbatch_size=device_train_microbatch_size,
seed=args.seed)
logging.info('Built Trainer\n')
# Start training!
logging.info('Train!')
trainer.fit()
if __name__ == '__main__':
_main()
| composer-dev | examples/segmentation/train_deeplabv3_ade20k.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Example script to train a ResNet model on ImageNet."""
import argparse
import logging
import os
import torch
from torch.utils.data import DataLoader
from torchmetrics import MetricCollection
from torchmetrics.classification import MulticlassAccuracy
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.models import resnet
from composer import DataSpec, Time, Trainer
from composer.algorithms import (EMA, SAM, BlurPool, ChannelsLast, ColOut, LabelSmoothing, MixUp, ProgressiveResizing,
RandAugment, StochasticDepth)
from composer.callbacks import CheckpointSaver, LRMonitor, SpeedMonitor
from composer.datasets.utils import NormalizationFn, pil_image_collate
from composer.loggers import WandBLogger
from composer.loss import binary_cross_entropy_with_logits, soft_cross_entropy
from composer.metrics import CrossEntropy
from composer.models.tasks import ComposerClassifier
from composer.optim import CosineAnnealingWithWarmupScheduler, DecoupledSGDW
from composer.utils import dist
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
# Dataloader arguments
parser.add_argument('data_dir', help='Path to the directory containing the ImageNet-1k dataset', type=str)
parser.add_argument('--train_crop_size', help='Training image crop size', type=int, default=224)
parser.add_argument('--eval_resize_size', help='Evaluation image resize size', type=int, default=256)
parser.add_argument('--eval_crop_size', help='Evaluation image crop size', type=int, default=224)
parser.add_argument('--train_batch_size', help='Train dataloader per-device batch size', type=int, default=2048)
parser.add_argument('--eval_batch_size', help='Validation dataloader per-device batch size', type=int, default=2048)
# Model arguments
parser.add_argument('--model_name',
help='Name of the resnet model to train',
default='resnet50',
choices=['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'])
parser.add_argument('--loss_name',
help='Name of the loss function to use for training',
default='cross_entropy',
choices=['cross_entropy', 'binary_cross_entropy'])
# Optimizer arguments
parser.add_argument('--learning_rate', help='Optimizer learning rate', type=float, default=2.048)
parser.add_argument('--momentum', help='Optimizer momentum', type=float, default=0.875)
parser.add_argument('--weight_decay', help='Optimizer weight decay', type=float, default=5.0e-4)
# LR scheduler arguments
parser.add_argument('--t_warmup',
help='Duration of learning rate warmup specified as a Time string',
type=Time.from_timestring,
default='8ep')
parser.add_argument('--t_max',
help='Duration to cosine decay the learning rate specified as a Time string',
type=Time.from_timestring,
default='1dur')
# Save checkpoint arguments
parser.add_argument('--save_checkpoint_dir',
help='Directory in which to save model checkpoints',
type=str,
default='checkpoints/{run_name}')
parser.add_argument('--checkpoint_interval', help='Frequency to save checkpoints', type=str, default='1ep')
# Load checkpoint arguments, assumes resuming the previous training run instead of fine-tuning
parser.add_argument('--load_checkpoint_path', help='Path to the checkpoint to load', type=str)
# Recipes
parser.add_argument('--recipe_name',
help='Either "mild", "medium" or "spicy" in order of increasing training time and accuracy',
type=str,
choices=['mild', 'medium', 'spicy'])
# Logger parameters: progress bar logging is used by default
# Only has Weights and Biases option to reduce the number of arguments. Other loggers can be substituted in the script
parser.add_argument('--wandb_logger', help='Whether or not to log results to Weights and Biases', action='store_true')
parser.add_argument('--wandb_entity', help='WandB entity name', type=str)
parser.add_argument('--wandb_project', help='WandB project name', type=str)
parser.add_argument('--wandb_run_name', help='WandB run name', type=str)
# Trainer arguments
parser.add_argument('--run_name', help='Name of the training run used for checkpointing and other logging', type=str)
parser.add_argument('--seed', help='Random seed', type=int, default=17)
parser.add_argument('--max_duration',
help='Duration to train specified as a Time string',
type=Time.from_timestring,
default='90ep')
parser.add_argument('--eval_interval',
help='How frequently to run evaluation on the validation set specified as a Time string',
type=Time.from_timestring,
default='1ep')
args = parser.parse_args()
def _main():
# Divide batch sizes by number of devices if running multi-gpu training
if dist.get_world_size():
args.train_batch_size = args.train_batch_size // dist.get_world_size()
args.eval_batch_size = args.eval_batch_size // dist.get_world_size()
# Scale by 255 since the collate `pil_image_collate` results in images in range 0-255
# If using ToTensor() and the default collate, remove the scaling by 255
IMAGENET_CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)
IMAGENET_CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)
# Train dataset
logging.info('Building train dataloader')
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(args.train_crop_size, scale=(0.08, 1.0), ratio=(0.75, 4.0 / 3.0)),
transforms.RandomHorizontalFlip(),
])
train_dataset = ImageFolder(os.path.join(args.data_dir, 'train'), train_transforms)
# Nifty function to instantiate a PyTorch DistributedSampler based on your hardware setup
train_sampler = dist.get_sampler(train_dataset, drop_last=True, shuffle=True)
train_dataloader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
num_workers=8,
pin_memory=True,
drop_last=True,
sampler=train_sampler,
collate_fn=pil_image_collate,
persistent_workers=True, # Reduce overhead of creating new workers at the expense of using slightly more RAM
)
# DataSpec allows for on-gpu transformations, marginally relieving dataloader bottleneck
train_dataspec = DataSpec(dataloader=train_dataloader,
device_transforms=NormalizationFn(mean=IMAGENET_CHANNEL_MEAN, std=IMAGENET_CHANNEL_STD))
logging.info('Built train dataloader\n')
# Validation dataset
logging.info('Building evaluation dataloader')
eval_transforms = transforms.Compose([
transforms.Resize(args.eval_resize_size),
transforms.CenterCrop(args.eval_crop_size),
])
eval_dataset = ImageFolder(os.path.join(args.data_dir, 'val'), eval_transforms)
# Nifty function to instantiate a PyTorch DistributedSampler based on your hardware setup,
eval_sampler = dist.get_sampler(eval_dataset, drop_last=False, shuffle=False)
eval_dataloader = DataLoader(
eval_dataset,
batch_size=args.eval_batch_size,
num_workers=8,
pin_memory=True,
drop_last=False,
sampler=eval_sampler,
collate_fn=pil_image_collate,
persistent_workers=True, # Reduce overhead of creating new workers at the expense of using slightly more RAM
)
eval_dataspec = DataSpec(dataloader=eval_dataloader,
device_transforms=NormalizationFn(mean=IMAGENET_CHANNEL_MEAN, std=IMAGENET_CHANNEL_STD))
logging.info('Built evaluation dataloader\n')
# Instantiate torchvision ResNet model
logging.info('Building Composer model')
model_fn = getattr(resnet, args.model_name)
model = model_fn(num_classes=1000, groups=1, width_per_group=64)
# Specify model initialization
def weight_init(w: torch.nn.Module):
if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(w.weight)
if isinstance(w, torch.nn.BatchNorm2d):
w.weight.data = torch.rand(w.weight.data.shape)
w.bias.data = torch.zeros_like(w.bias.data)
# When using binary cross entropy, set the classification layer bias to -log(num_classes)
# to ensure the initial probabilities are approximately 1 / num_classes
if args.loss_name == 'binary_cross_entropy' and isinstance(w, torch.nn.Linear):
w.bias.data = torch.ones(w.bias.shape) * -torch.log(torch.tensor(w.bias.shape[0]))
model.apply(weight_init)
# Performance metrics to log other than training loss
train_metrics = MulticlassAccuracy(num_classes=1000, average='micro')
val_metrics = MetricCollection([CrossEntropy(), MulticlassAccuracy(num_classes=1000, average='micro')])
# Cross entropy loss that can handle both index and one-hot targets
if args.loss_name == 'binary_cross_entropy':
loss_fn = binary_cross_entropy_with_logits
else:
loss_fn = soft_cross_entropy
# Wrapper function to convert a classification PyTorch model into a Composer model
composer_model = ComposerClassifier(model, train_metrics=train_metrics, val_metrics=val_metrics, loss_fn=loss_fn)
logging.info('Built Composer model\n')
# Optimizer
logging.info('Building optimizer and learning rate scheduler')
optimizer = DecoupledSGDW(composer_model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Learning rate scheduler: LR warmup for 8 epochs, then cosine decay for the rest of training
lr_scheduler = CosineAnnealingWithWarmupScheduler(t_warmup=args.t_warmup, t_max=args.t_max)
logging.info('Built optimizer and learning rate scheduler\n')
# Callbacks for logging
logging.info('Building SpeedMonitor, LRMonitor, and CheckpointSaver callbacks')
speed_monitor = SpeedMonitor(window_size=50) # Measures throughput as samples/sec and tracks total training time
lr_monitor = LRMonitor() # Logs the learning rate
# Callback for checkpointing
checkpoint_saver = CheckpointSaver(folder=args.save_checkpoint_dir, save_interval=args.checkpoint_interval)
logging.info('Built SpeedMonitor, LRMonitor, and CheckpointSaver callbacks\n')
# Recipes for training ResNet architectures on ImageNet in order of increasing training time and accuracy
# To learn about individual methods, check out "Methods Overview" in our documentation: https://docs.mosaicml.com/
logging.info('Building algorithm recipes')
if args.recipe_name == 'mild':
algorithms = [
BlurPool(),
ChannelsLast(),
EMA(half_life='100ba', update_interval='20ba'),
ProgressiveResizing(initial_scale=0.5, delay_fraction=0.4, finetune_fraction=0.2),
LabelSmoothing(smoothing=0.08),
]
elif args.recipe_name == 'medium':
algorithms = [
BlurPool(),
ChannelsLast(),
EMA(half_life='100ba', update_interval='20ba'),
ProgressiveResizing(initial_scale=0.5, delay_fraction=0.4, finetune_fraction=0.2),
LabelSmoothing(smoothing=0.1),
MixUp(alpha=0.2),
SAM(rho=0.5, interval=10),
]
elif args.recipe_name == 'spicy':
algorithms = [
BlurPool(),
ChannelsLast(),
EMA(half_life='100ba', update_interval='20ba'),
ProgressiveResizing(initial_scale=0.6, delay_fraction=0.2, finetune_fraction=0.2),
LabelSmoothing(smoothing=0.13),
MixUp(alpha=0.25),
SAM(rho=0.5, interval=5),
ColOut(p_col=0.05, p_row=0.05),
RandAugment(depth=1, severity=9),
StochasticDepth(target_layer_name='ResNetBottleneck',
stochastic_method='sample',
drop_distribution='linear',
drop_rate=0.1)
]
else:
algorithms = None
logging.info('Built algorithm recipes\n')
logger = None
if args.wandb_logger:
if args.wandb_entity is None:
raise ValueError('Please specify --wandb_entity argument')
if args.wandb_project is None:
raise ValueError('Please specify --wandb_project argument')
if args.wandb_run_name is None:
raise ValueError('Please specify --wandb_run_name argument')
logger = WandBLogger(entity=args.wandb_entity, project=args.wandb_project, name=args.wandb_run_name)
# Create the Trainer!
logging.info('Building Trainer')
device = 'gpu' if torch.cuda.is_available() else 'cpu'
precision = 'amp' if device == 'gpu' else 'fp32' # Mixed precision for fast training when using a GPU
trainer = Trainer(run_name=args.run_name,
model=composer_model,
train_dataloader=train_dataspec,
eval_dataloader=eval_dataspec,
eval_interval=args.eval_interval,
optimizers=optimizer,
schedulers=lr_scheduler,
algorithms=algorithms,
loggers=logger,
max_duration=args.max_duration,
callbacks=[speed_monitor, lr_monitor, checkpoint_saver],
load_path=args.load_checkpoint_path,
device=device,
precision=precision,
device_train_microbatch_size='auto',
seed=args.seed)
logging.info('Built Trainer\n')
# Start training!
logging.info('Train!')
trainer.fit()
if __name__ == '__main__':
_main()
| composer-dev | examples/imagenet/train_resnet_imagenet1k.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper utilities to create FFCV datasets."""
import logging
import os
import sys
import textwrap
from argparse import ArgumentParser
from io import BytesIO
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Subset
from torchvision import transforms
from torchvision.datasets import CIFAR10, ImageFolder
from tqdm import tqdm
from composer.datasets.ffcv_utils import write_ffcv_dataset
log = logging.getLogger(__name__)
def _get_parser():
parser = ArgumentParser(description='Utility for converting datasets to ffcv format.')
parser.add_argument('--dataset',
type=str,
default='cifar10',
choices=['cifar10', 'imagenet1k'],
help=textwrap.dedent("""\
Dataset to use. Default: cifar10"""))
parser.add_argument('--remote',
type=str,
help=textwrap.dedent("""\
Remote directory (S3 or local filesystem) where dataset is stored., Example: s3://my-s3-bucket-name"""
))
parser.add_argument('--local',
type=str,
default=None,
help=textwrap.dedent("""\
Local filesystem directory where dataset is cached during operation. Default: None"""))
parser.add_argument('--split',
type=str,
default='train',
choices=['train', 'val'],
help=textwrap.dedent("""\
Split to use. Default: train"""))
parser.add_argument('--datadir',
type=str,
default=None,
help=textwrap.dedent("""\
Location of the dataset. Default: None"""))
parser.add_argument('--download',
type=bool,
default=False,
help=textwrap.dedent("""\
Download the dataset if possible. Default: False"""))
parser.add_argument('--write_path',
type=str,
default=None,
help=textwrap.dedent("""\
File path to use for writing the dataset. Default: /tmp/<dataset>_<split>.ffcv"""))
parser.add_argument('--write_mode',
type=str,
default='proportion',
choices=['raw', 'jpg', 'smart', 'proportion'],
help=textwrap.dedent("""\
Write mode to use. raw is uint8 values, jpg is jpeg compressed images, smart is
compressing based on image size and proportion is according to the given
compress_probability. Default: proportion"""))
parser.add_argument('--max_resolution', type=int, default=500, help='Max resoultion for images.')
parser.add_argument('--num_workers', type=int, default=64, help='Number of workers to use.')
parser.add_argument('--chunk_size', type=int, default=100, help='Chunk size to use.')
parser.add_argument('--jpeg_quality', type=int, default=90, help='Quality of jpeg.')
parser.add_argument('--subset', type=int, default=-1, help='Only use a subset of dataset.')
parser.add_argument('--compress_probability',
type=float,
required=False,
default=0.50,
help='Compress the given fraction of images to jpeg while writing the ffcv dataset.')
return parser
def _parse_args():
parser = _get_parser()
args = parser.parse_args()
if args.datadir is not None:
log.info(f'Will read from local directory: {args.datadir}.')
else:
if args.local is None:
args.local = f'/tmp/mds-cache/mds-{args.dataset}/'
if args.remote.startswith('s3://'):
log.info(f'Will read from remote: {args.remote}.')
else:
log.info(f'Will read from local: {args.remote}.')
if args.write_path is None:
args.write_path = f'/tmp/{args.dataset}_{args.split}.ffcv'
if os.path.exists(args.write_path):
log.error(f'Destination already exists: {args.write_path}')
sys.exit(-1)
return args
def _main():
args = _parse_args()
if args.dataset == 'cifar10':
dataset = CIFAR10(root=args.datadir, train=(args.split == 'train'), download=args.download)
elif args.dataset == 'imagenet1k':
dataset = ImageFolder(os.path.join(args.datadir, args.split))
else:
raise ValueError(f'Unsupported dataset: {args.dataset}. Checkout the list of supported datasets with -h')
if args.subset > 0:
dataset = Subset(dataset, range(args.subset))
write_ffcv_dataset(dataset=dataset,
write_path=args.write_path,
max_resolution=args.max_resolution,
num_workers=args.num_workers,
write_mode=args.write_mode,
compress_probability=args.compress_probability,
jpeg_quality=args.jpeg_quality,
chunk_size=args.chunk_size)
if __name__ == '__main__':
sys.exit(_main())
| composer-dev | scripts/ffcv/create_ffcv_datasets.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Reads in the Docker build matrix and generates a GHA job matrix."""
import json
from argparse import ArgumentParser, FileType, Namespace
from uuid import uuid4
import yaml
def _parse_args() -> Namespace:
"""Parse command-line arguments.
Returns:
Namespace: Command-line arguments.
"""
args = ArgumentParser(description='Process a Docker matrix YAML file.')
args.add_argument('yaml_file', type=FileType('r'), help='The YAML file to be processed.')
args.add_argument('-b',
'--build_args',
action='append',
required=False,
help='List of build args to override globally')
return args.parse_args()
def main(args: Namespace):
"""Reads in the Docker build matrix and generates a GHA job matrix."""
image_configs = yaml.safe_load(args.yaml_file)
for image_config in image_configs:
# Convert tags list to a CSV string
image_config['TAGS'] = ','.join(image_config['TAGS'])
# Generate a random UUID for staging
image_config['UUID'] = str(uuid4())
# Apply build args override
if args.build_args is not None:
for build_arg in args.build_args:
arg, val = build_arg.split('=')
if arg in image_config.keys():
image_config[arg] = val
json_string = json.dumps(image_configs)
print(f"""matrix={{"include": {json_string}}}""")
if __name__ == '__main__':
main(_parse_args())
| composer-dev | .github/bin/gen_docker_matrix.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Run pytest using MCP."""
import argparse
import time
from mcli.sdk import RunConfig, RunStatus, create_run, follow_run_logs, stop_run, wait_for_run_status
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='mcp-pytest', help='Base name of run')
parser.add_argument('--cluster', type=str, default='r1z4', help='Cluster to use')
parser.add_argument('--gpu_type', type=str, default='a100_40gb', help='Type of GPU to use')
parser.add_argument('--gpu_num', type=int, default=2, help='Number of the GPU to use')
parser.add_argument('--image', type=str, default='mosaicml/pytorch:latest', help='Docker image to use')
parser.add_argument('--git_branch', type=str, help='Git branch to check out')
parser.add_argument('--git_commit', type=str, help='Git commit to check out. Overrides git_branch if specified')
parser.add_argument('--pip_package_name', type=str, help='Name of pip package to install before running tests')
parser.add_argument('--pr_number',
type=int,
help='PR number to check out. Overrides git_branch/git_commit if specified')
parser.add_argument('--pytest_markers', type=str, help='Markers to pass to pytest')
parser.add_argument('--pytest_command', type=str, help='Command to run pytest')
parser.add_argument('--timeout', type=int, default=1800, help='Timeout for run (in seconds)')
args = parser.parse_args()
name = args.name
git_integration = {
'integration_type': 'git_repo',
'git_repo': 'mosaicml/composer',
'ssh_clone': 'False',
}
if args.git_branch is not None and args.git_commit is None:
name += f'-branch-{args.git_branch}'
git_integration['git_branch'] = args.git_branch
if args.git_commit is not None:
name += f'-commit-{args.git_commit}'
git_integration['git_commit'] = args.git_commit
command = 'cd composer'
# Checkout a specific PR if specified
if args.pr_number is not None:
name += f'-pr-{args.pr_number}'
command += f'''
git fetch origin pull/{args.pr_number}/head:pr_branch
git checkout pr_branch
'''
# Shorten name if too long
if len(name) > 56:
name = name[:56]
command += f'''
export COMPOSER_PACKAGE_NAME='{args.pip_package_name}'
pip install --upgrade --user .[all]
export COMMON_ARGS="-v --durations=20 -m '{args.pytest_markers}'"
make test PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS --codeblocks"
make test-dist PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS" WORLD_SIZE=2
python -m coverage combine
python -m coverage report
'''
config = RunConfig(
name=name,
cluster=args.cluster,
gpu_type=args.gpu_type,
gpu_num=args.gpu_num,
image=args.image,
integrations=[git_integration],
command=command,
)
# Create run
run = create_run(config)
print(f'[GHA] Run created: {run.name}')
# Wait until run starts before fetching logs
run = wait_for_run_status(run, status='running')
start_time = time.time()
print('[GHA] Run started. Following logs...')
# Print logs
for line in follow_run_logs(run):
print(line, end='')
# Check if args.timeout seconds have elapsed
if time.time() - start_time > args.timeout:
print(f'[GHA] Run timed out and did not complete in {args.timeout/60} minutes.')
run = stop_run(run)
print('[GHA] Run stopped.')
break
print('[GHA] Run completed. Waiting for run to finish...')
run = wait_for_run_status(run, status='completed')
# Fail if command exited with non-zero exit code or timed out
assert run.status == RunStatus.COMPLETED
| composer-dev | .github/mcp/mcp_pytest.py |
#!/usr/bin/python
import os
import sys
import argparse
import numpy as np
from skimage import color, io
import scipy.ndimage.interpolation as sni
import caffe
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('flist', type=str,
help='file containing list of images to process')
parser.add_argument('output', type=str,
help='output directory')
parser.add_argument('-p', '--proto', type=str,
default='../models/colorization_deploy_v2.prototxt',
help='prototxt file of the net model')
parser.add_argument('-m', '--model', type=str,
default='../models/colorization_release_v2.caffemodel',
help='caffemodel file of the net model')
parser.add_argument('-c', '--cluster', type=str,
default='../resources/pts_in_hull.npy',
help='cluster centers (pts in hull)')
parser.add_argument('-g', '--gpu', type=int,
default=0,
help='gpu id')
args = parser.parse_args(args=argv)
return args
# Prepare network
def prepare_net(proto, model, cluster):
net = caffe.Net(proto, model, caffe.TEST)
in_shape = net.blobs['data_l'].data.shape[2:] # get input shape
out_shape = net.blobs['class8_ab'].data.shape[2:] # get output shape
print 'Input dimensions: %s' % str(in_shape)
print 'Output dimensions: %s' % str(out_shape)
pts_in_hull = np.load(cluster) # load cluster centers
net.params['class8_ab'][0].data[:,:,0,0] = pts_in_hull.transpose((1,0)) # populate cluster centers as 1x1 convolution kernel
print 'Annealed-Mean Parameters populated'
return net, in_shape, out_shape
# Prepare image for network
def prepare_img(fpath, in_shape):
# load the original image
img_rgb = caffe.io.load_image(fpath)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:,:,0] # pull out L channel
orig_shape = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb, in_shape) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:,:,0]
return img_l_rs, img_l, orig_shape
# Process image
def process(net, in_shape, out_shape, fpath):
img_l_rs, img_l, orig_shape = prepare_img(fpath, in_shape)
net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
net.forward() # run network
ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
shape = (1.*orig_shape[0]/out_shape[0], 1.*orig_shape[1]/out_shape[1])
ab_dec_us = sni.zoom(ab_dec,(shape[0],shape[1],1)) # upsample to match size of original image L
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
img_rgb_out = np.clip(color.lab2rgb(img_lab_out),0,1) # convert back to rgb
return img_rgb_out
# Save image
def save_img(img, fpath, out_dir):
fname_in = os.path.basename(fpath)
fpath_out = os.path.join(out_dir, fname_in)
io.imsave(fpath_out, img)
# Main
def main(argv):
# Parse arguments
args = parse_args(argv)
print args
# Prepare caffe and net
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
net, in_shape, out_shape = prepare_net(args.proto, args.model, args.cluster)
# Process files
with open(args.flist) as flist:
for fpath in flist:
fpath = fpath.rstrip('\n')
print 'Processing file %s ...' % fpath
img = process(net, in_shape, out_shape, fpath)
save_img(img, fpath, args.output)
print 'Done!'
if __name__ == "__main__":
main(sys.argv[1:])
| colorization-master | demo/batch_process.py |
#!/usr/bin/env python
import datetime
import os
import sys
def extract_datetime_from_line(line, year):
# Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565
line = line.strip().split()
month = int(line[0][1:3])
day = int(line[0][3:])
timestamp = line[1]
pos = timestamp.rfind('.')
ts = [int(x) for x in timestamp[:pos].split(':')]
hour = ts[0]
minute = ts[1]
second = ts[2]
microsecond = int(timestamp[pos + 1:])
dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)
return dt
def get_log_created_year(input_file):
"""Get year from log file system timestamp
"""
log_created_time = os.path.getctime(input_file)
log_created_year = datetime.datetime.fromtimestamp(log_created_time).year
return log_created_year
def get_start_time(line_iterable, year):
"""Find start time from group of lines
"""
start_datetime = None
for line in line_iterable:
line = line.strip()
if line.find('Solving') != -1:
start_datetime = extract_datetime_from_line(line, year)
break
return start_datetime
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if line.find('Iteration') != -1:
dt = extract_datetime_from_line(line, log_created_year)
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write('%f\n' % elapsed_seconds)
out.close()
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: ./extract_seconds input_file output_file')
exit(1)
extract_seconds(sys.argv[1], sys.argv[2])
| colorization-master | caffe-colorization/tools/extra/extract_seconds.py |
#!/usr/bin/env python
"""
Parse training log
Evolved from parse_log.sh
"""
import os
import re
import extract_seconds
import argparse
import csv
from collections import OrderedDict
def parse_log(path_to_log):
"""Parse log file
Returns (train_dict_list, train_dict_names, test_dict_list, test_dict_names)
train_dict_list and test_dict_list are lists of dicts that define the table
rows
train_dict_names and test_dict_names are ordered tuples of the column names
for the two dict_lists
"""
regex_iteration = re.compile('Iteration (\d+)')
regex_train_output = re.compile('Train net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_test_output = re.compile('Test net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_learning_rate = re.compile('lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)')
# Pick out lines of interest
iteration = -1
learning_rate = float('NaN')
train_dict_list = []
test_dict_list = []
train_row = None
test_row = None
logfile_year = extract_seconds.get_log_created_year(path_to_log)
with open(path_to_log) as f:
start_time = extract_seconds.get_start_time(f, logfile_year)
for line in f:
iteration_match = regex_iteration.search(line)
if iteration_match:
iteration = float(iteration_match.group(1))
if iteration == -1:
# Only start parsing for other stuff if we've found the first
# iteration
continue
time = extract_seconds.extract_datetime_from_line(line,
logfile_year)
seconds = (time - start_time).total_seconds()
learning_rate_match = regex_learning_rate.search(line)
if learning_rate_match:
learning_rate = float(learning_rate_match.group(1))
train_dict_list, train_row = parse_line_for_net_output(
regex_train_output, train_row, train_dict_list,
line, iteration, seconds, learning_rate
)
test_dict_list, test_row = parse_line_for_net_output(
regex_test_output, test_row, test_dict_list,
line, iteration, seconds, learning_rate
)
fix_initial_nan_learning_rate(train_dict_list)
fix_initial_nan_learning_rate(test_dict_list)
return train_dict_list, test_dict_list
def parse_line_for_net_output(regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate):
"""Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list
"""
output_match = regex_obj.search(line)
if output_match:
if not row or row['NumIters'] != iteration:
# Push the last row and start a new one
if row:
# If we're on a new iteration, push the last row
# This will probably only happen for the first row; otherwise
# the full row checking logic below will push and clear full
# rows
row_dict_list.append(row)
row = OrderedDict([
('NumIters', iteration),
('Seconds', seconds),
('LearningRate', learning_rate)
])
# output_num is not used; may be used in the future
# output_num = output_match.group(1)
output_name = output_match.group(2)
output_val = output_match.group(3)
row[output_name] = float(output_val)
if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]):
# The row is full, based on the fact that it has the same number of
# columns as the first row; append it to the list
row_dict_list.append(row)
row = None
return row_dict_list, row
def fix_initial_nan_learning_rate(dict_list):
"""Correct initial value of learning rate
Learning rate is normally not printed until after the initial test and
training step, which means the initial testing and training rows have
LearningRate = NaN. Fix this by copying over the LearningRate from the
second row, if it exists.
"""
if len(dict_list) > 1:
dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']
def save_csv_files(logfile_path, output_dir, train_dict_list, test_dict_list,
delimiter=',', verbose=False):
"""Save CSV files to output_dir
If the input log file is, e.g., caffe.INFO, the names will be
caffe.INFO.train and caffe.INFO.test
"""
log_basename = os.path.basename(logfile_path)
train_filename = os.path.join(output_dir, log_basename + '.train')
write_csv(train_filename, train_dict_list, delimiter, verbose)
test_filename = os.path.join(output_dir, log_basename + '.test')
write_csv(test_filename, test_dict_list, delimiter, verbose)
def write_csv(output_filename, dict_list, delimiter, verbose=False):
"""Write a CSV file
"""
if not dict_list:
if verbose:
print('Not writing %s; no lines to write' % output_filename)
return
dialect = csv.excel
dialect.delimiter = delimiter
with open(output_filename, 'w') as f:
dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
dialect=dialect)
dict_writer.writeheader()
dict_writer.writerows(dict_list)
if verbose:
print 'Wrote %s' % output_filename
def parse_args():
description = ('Parse a Caffe training log into two CSV files '
'containing training and testing information')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('logfile_path',
help='Path to log file')
parser.add_argument('output_dir',
help='Directory in which to place output CSV files')
parser.add_argument('--verbose',
action='store_true',
help='Print some extra info (e.g., output filenames)')
parser.add_argument('--delimiter',
default=',',
help=('Column delimiter in output files '
'(default: \'%(default)s\')'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_dict_list, test_dict_list = parse_log(args.logfile_path)
save_csv_files(args.logfile_path, args.output_dir, train_dict_list,
test_dict_list, delimiter=args.delimiter)
if __name__ == '__main__':
main()
| colorization-master | caffe-colorization/tools/extra/parse_log.py |
#!/usr/bin/env python
"""Net summarization tool.
This tool summarizes the structure of a net in a concise but comprehensive
tabular listing, taking a prototxt file as input.
Use this tool to check at a glance that the computation you've specified is the
computation you expect.
"""
from caffe.proto import caffe_pb2
from google import protobuf
import re
import argparse
# ANSI codes for coloring blobs (used cyclically)
COLORS = ['92', '93', '94', '95', '97', '96', '42', '43;30', '100',
'444', '103;30', '107;30']
DISCONNECTED_COLOR = '41'
def read_net(filename):
net = caffe_pb2.NetParameter()
with open(filename) as f:
protobuf.text_format.Parse(f.read(), net)
return net
def format_param(param):
out = []
if len(param.name) > 0:
out.append(param.name)
if param.lr_mult != 1:
out.append('x{}'.format(param.lr_mult))
if param.decay_mult != 1:
out.append('Dx{}'.format(param.decay_mult))
return ' '.join(out)
def printed_len(s):
return len(re.sub(r'\033\[[\d;]+m', '', s))
def print_table(table, max_width):
"""Print a simple nicely-aligned table.
table must be a list of (equal-length) lists. Columns are space-separated,
and as narrow as possible, but no wider than max_width. Text may overflow
columns; note that unlike string.format, this will not affect subsequent
columns, if possible."""
max_widths = [max_width] * len(table[0])
column_widths = [max(printed_len(row[j]) + 1 for row in table)
for j in range(len(table[0]))]
column_widths = [min(w, max_w) for w, max_w in zip(column_widths, max_widths)]
for row in table:
row_str = ''
right_col = 0
for cell, width in zip(row, column_widths):
right_col += width
row_str += cell + ' '
row_str += ' ' * max(right_col - printed_len(row_str), 0)
print row_str
def summarize_net(net):
disconnected_tops = set()
for lr in net.layer:
disconnected_tops |= set(lr.top)
disconnected_tops -= set(lr.bottom)
table = []
colors = {}
for lr in net.layer:
tops = []
for ind, top in enumerate(lr.top):
color = colors.setdefault(top, COLORS[len(colors) % len(COLORS)])
if top in disconnected_tops:
top = '\033[1;4m' + top
if len(lr.loss_weight) > 0:
top = '{} * {}'.format(lr.loss_weight[ind], top)
tops.append('\033[{}m{}\033[0m'.format(color, top))
top_str = ', '.join(tops)
bottoms = []
for bottom in lr.bottom:
color = colors.get(bottom, DISCONNECTED_COLOR)
bottoms.append('\033[{}m{}\033[0m'.format(color, bottom))
bottom_str = ', '.join(bottoms)
if lr.type == 'Python':
type_str = lr.python_param.module + '.' + lr.python_param.layer
else:
type_str = lr.type
# Summarize conv/pool parameters.
# TODO support rectangular/ND parameters
conv_param = lr.convolution_param
if (lr.type in ['Convolution', 'Deconvolution']
and len(conv_param.kernel_size) == 1):
arg_str = str(conv_param.kernel_size[0])
if len(conv_param.stride) > 0 and conv_param.stride[0] != 1:
arg_str += '/' + str(conv_param.stride[0])
if len(conv_param.pad) > 0 and conv_param.pad[0] != 0:
arg_str += '+' + str(conv_param.pad[0])
arg_str += ' ' + str(conv_param.num_output)
if conv_param.group != 1:
arg_str += '/' + str(conv_param.group)
elif lr.type == 'Pooling':
arg_str = str(lr.pooling_param.kernel_size)
if lr.pooling_param.stride != 1:
arg_str += '/' + str(lr.pooling_param.stride)
if lr.pooling_param.pad != 0:
arg_str += '+' + str(lr.pooling_param.pad)
else:
arg_str = ''
if len(lr.param) > 0:
param_strs = map(format_param, lr.param)
if max(map(len, param_strs)) > 0:
param_str = '({})'.format(', '.join(param_strs))
else:
param_str = ''
else:
param_str = ''
table.append([lr.name, type_str, param_str, bottom_str, '->', top_str,
arg_str])
return table
def main():
parser = argparse.ArgumentParser(description="Print a concise summary of net computation.")
parser.add_argument('filename', help='net prototxt file to summarize')
parser.add_argument('-w', '--max-width', help='maximum field width',
type=int, default=30)
args = parser.parse_args()
net = read_net(args.filename)
table = summarize_net(net)
print_table(table, max_width=args.max_width)
if __name__ == '__main__':
main()
| colorization-master | caffe-colorization/tools/extra/summarize.py |
#!/usr/bin/env python
from mincepie import mapreducer, launcher
import gflags
import os
import cv2
from PIL import Image
# gflags
gflags.DEFINE_string('image_lib', 'opencv',
'OpenCV or PIL, case insensitive. The default value is the faster OpenCV.')
gflags.DEFINE_string('input_folder', '',
'The folder that contains all input images, organized in synsets.')
gflags.DEFINE_integer('output_side_length', 256,
'Expected side length of the output image.')
gflags.DEFINE_string('output_folder', '',
'The folder that we write output resized and cropped images to')
FLAGS = gflags.FLAGS
class OpenCVResizeCrop:
def resize_and_crop_image(self, input_file, output_file, output_side_length = 256):
'''Takes an image name, resize it and crop the center square
'''
img = cv2.imread(input_file)
height, width, depth = img.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
resized_img = cv2.resize(img, (new_width, new_height))
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
cropped_img = resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
cv2.imwrite(output_file, cropped_img)
class PILResizeCrop:
## http://united-coders.com/christian-harms/image-resizing-tips-every-coder-should-know/
def resize_and_crop_image(self, input_file, output_file, output_side_length = 256, fit = True):
'''Downsample the image.
'''
img = Image.open(input_file)
box = (output_side_length, output_side_length)
#preresize image with factor 2, 4, 8 and fast algorithm
factor = 1
while img.size[0]/factor > 2*box[0] and img.size[1]*2/factor > 2*box[1]:
factor *=2
if factor > 1:
img.thumbnail((img.size[0]/factor, img.size[1]/factor), Image.NEAREST)
#calculate the cropping box and get the cropped part
if fit:
x1 = y1 = 0
x2, y2 = img.size
wRatio = 1.0 * x2/box[0]
hRatio = 1.0 * y2/box[1]
if hRatio > wRatio:
y1 = int(y2/2-box[1]*wRatio/2)
y2 = int(y2/2+box[1]*wRatio/2)
else:
x1 = int(x2/2-box[0]*hRatio/2)
x2 = int(x2/2+box[0]*hRatio/2)
img = img.crop((x1,y1,x2,y2))
#Resize the image with best quality algorithm ANTI-ALIAS
img.thumbnail(box, Image.ANTIALIAS)
#save it into a file-like object
with open(output_file, 'wb') as out:
img.save(out, 'JPEG', quality=75)
class ResizeCropImagesMapper(mapreducer.BasicMapper):
'''The ImageNet Compute mapper.
The input value would be the file listing images' paths relative to input_folder.
'''
def map(self, key, value):
if type(value) is not str:
value = str(value)
files = [value]
image_lib = FLAGS.image_lib.lower()
if image_lib == 'pil':
resize_crop = PILResizeCrop()
else:
resize_crop = OpenCVResizeCrop()
for i, line in enumerate(files):
try:
line = line.replace(FLAGS.input_folder, '').strip()
line = line.split()
image_file_name = line[0]
input_file = os.path.join(FLAGS.input_folder, image_file_name)
output_file = os.path.join(FLAGS.output_folder, image_file_name)
output_dir = output_file[:output_file.rfind('/')]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
feat = resize_crop.resize_and_crop_image(input_file, output_file,
FLAGS.output_side_length)
except Exception, e:
# we ignore the exception (maybe the image is corrupted?)
print line, Exception, e
yield value, FLAGS.output_folder
mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper)
mapreducer.REGISTER_DEFAULT_READER(mapreducer.FileReader)
mapreducer.REGISTER_DEFAULT_WRITER(mapreducer.FileWriter)
if __name__ == '__main__':
launcher.launch()
| colorization-master | caffe-colorization/tools/extra/resize_and_crop_images.py |
#!/usr/bin/env python
"""
classify.py is an out-of-the-box image classifer callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
"""
import numpy as np
import os
import sys
import argparse
import glob
import time
import caffe
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
help="Input image, directory, or npy."
)
parser.add_argument(
"output_file",
help="Output npy filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='256,256',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of [Channels x Height x Width] dimensions " +
"(numpy array). Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
args = parser.parse_args()
image_dims = [int(s) for s in args.images_dim.split(',')]
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap)
# Load numpy array (.npy), directory glob (*.jpg), or image file.
args.input_file = os.path.expanduser(args.input_file)
if args.input_file.endswith('npy'):
print("Loading file: %s" % args.input_file)
inputs = np.load(args.input_file)
elif os.path.isdir(args.input_file):
print("Loading folder: %s" % args.input_file)
inputs =[caffe.io.load_image(im_f)
for im_f in glob.glob(args.input_file + '/*.' + args.ext)]
else:
print("Loading file: %s" % args.input_file)
inputs = [caffe.io.load_image(args.input_file)]
print("Classifying %d inputs." % len(inputs))
# Classify.
start = time.time()
predictions = classifier.predict(inputs, not args.center_only)
print("Done in %.2f s." % (time.time() - start))
# Save
print("Saving results into %s" % args.output_file)
np.save(args.output_file, predictions)
if __name__ == '__main__':
main(sys.argv)
| colorization-master | caffe-colorization/python/classify.py |
import numpy as np
import caffe
import sys
def f(a, b):
sm = 0
for i in range(a.shape[0]):
sm += ((a[0]-b[0]) / (a[0]+b[0])).mean()
return sm
A = np.random.random((16, 500, 500, 3))
B = np.random.random((16, 500, 500, 3))
a, b = caffe.Array(A.shape), caffe.Array(B.shape)
a[...] = A
b[...] = B
from time import time
t0, sm = time(), 0
for i in range(16):
sm += f(A,B)
print( "Numpy T", time()-t0, 'SM', sm )
t0, sm = time(), 0
for i in range(16):
sm += f(a,b)
print( "Array(C) T", time()-t0, 'SM', sm )
del a, b
caffe.set_mode_gpu()
a, b = caffe.Array(A), caffe.Array(B)
t0, sm = time(), 0
for i in range(16):
sm += f(a,b)
print( "Array(G) T", time()-t0, 'SM', sm )
| colorization-master | caffe-colorization/python/test_array.py |
#!/usr/bin/env python
"""
Draw a graph of the net architecture.
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from google.protobuf import text_format
import caffe
import caffe.draw
from caffe.proto import caffe_pb2
def parse_args():
"""Parse input arguments
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file',
help='Input network prototxt file')
parser.add_argument('output_image_file',
help='Output image file')
parser.add_argument('--rankdir',
help=('One of TB (top-bottom, i.e., vertical), '
'RL (right-left, i.e., horizontal), or another '
'valid dot option; see '
'http://www.graphviz.org/doc/info/'
'attrs.html#k:rankdir'),
default='LR')
args = parser.parse_args()
return args
def main():
args = parse_args()
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir)
if __name__ == '__main__':
main()
| colorization-master | caffe-colorization/python/draw_net.py |
#!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| colorization-master | caffe-colorization/python/detect.py |
#!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
Parameters
----------
mean, input_scale, raw_scale, channel_swap : params for preprocessing
options.
context_pad : amount of surrounding context to take s.t. a `context_pad`
sized border of pixels in the network input image is context, as in
R-CNN feature extraction.
"""
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Parameters
----------
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2, 3))
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Parameters
----------
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Returns
-------
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Parameters
----------
context_pad : amount of context for cropping.
"""
# crop dimensions
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| colorization-master | caffe-colorization/python/caffe/detector.py |
from __future__ import division
import caffe
import numpy as np
def transplant(new_net, net):
for p in net.params:
if p not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if net.params[p][i].data.shape != new_net.params[p][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p][i].data.shape
else:
print 'copying', p, i
new_net.params[p][i].data.flat = net.params[p][i].data.flat
def expand_score(new_net, new_layer, net, layer):
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
def upsample_filt2(size1,size2):
size = np.maximum(size1,size2)
factor = (size + 1) // 2
if size1 % 2 == 1:
center1 = factor - 1
else:
center1 = factor - 0.5
if size2 % 2 == 1:
center2 = factor - 1
else:
center2 = factor - 0.5
og = np.ogrid[:size1, :size2]
return (1 - abs(og[0] - center1) / factor) * \
(1 - abs(og[1] - center2) / factor)
def interp2(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
filt = upsample_filt2(h,w)
net.params[l][0].data[range(m), range(k), :, :] = filt
| colorization-master | caffe-colorization/python/caffe/surgery.py |
#!/usr/bin/env python
"""
Classifier is an image classifier specialization of Net.
"""
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
| colorization-master | caffe-colorization/python/caffe/classifier.py |
from __future__ import print_function
import caffe
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Solver:
def __init__(self, prototxt, final_file=None, snap_file=None, solver='Adam', log_file=None, **kwargs):
self.running = False
self.force_snapshot = False
self.last_interrupt = 0
self.final_file, self.snap_file = final_file, snap_file
if final_file is None:
print("Are you sure you dont want to save the model?")
solver_str = 'train_net: "%s"\n'%prototxt
if solver is not None:
# Get the cases right
if solver.upper() == "ADAM": solver = "Adam"
if solver.upper() == "ADADELTA": solver = "AdaDelta"
if solver.upper() == "ADAGRAD": solver = "AdaGrad"
if solver.upper() == "NESTEROV": solver = "Nesterov"
if solver.upper() == "RMSPROP": solver = "RMSProp"
if solver.upper() == "SGD": solver = "SGD"
solver_str += 'type: "%s"\n'%solver
if solver == "RMSProp":
if 'rms_decay' not in kwargs:
kwargs['rms_decay'] = 0.9
if solver == "SGD":
if 'momentum' not in kwargs:
kwargs['momentum'] = 0.9
if solver == "Adam":
if 'momentum' not in kwargs:
kwargs['momentum'] = 0.9
if 'momentum2' not in kwargs:
kwargs['momentum2'] = 0.99
if 'base_lr' not in kwargs: kwargs['base_lr'] = 0.001
if 'lr_policy' not in kwargs: kwargs['lr_policy'] = 'fixed'
for i in kwargs:
if isinstance(kwargs[i], str):
solver_str += '%s: "%s"\n'%(i, kwargs[i])
elif isinstance(kwargs[i], int):
solver_str += '%s: %d\n'%(i, kwargs[i])
else:
solver_str += '%s: %f\n'%(i, kwargs[i])
self.solver = caffe.get_solver_from_string(solver_str)
self.solver.add_callback(self.on_start, self.on_gradient)
self.log = None
if log_file:
self.log = open(log_file, 'w')
def on_start(self):
pass
def on_gradient(self):
pass
def on_display(self):
pass
def after_step(self):
pass
def stop(self):
print("Solver shutting down ...")
self.running = False
def try_stop(self):
from time import time
if time() - self.last_interrupt < 5:
return self.stop()
print("Snapshoting (To exit interrupt twice within 5 sec)")
self.force_snapshot = True
self.last_interrupt = time()
def run(self, nit, show_debug=False, print_interval=1, snap_interval=60):
import signal
from time import time
import numpy as np
self.running = True
avg_weight = 0.95
# Register interrupt
signal.signal(signal.SIGINT, lambda *a:self.try_stop())
# Train
s = self.solver
s.net.save(self.snap_file)
loss_blobs = [b for b in s.net.blob_loss_weights
if s.net.blob_loss_weights[b]!=0]
loss_weight = np.array([s.net.blob_loss_weights[b] for b in loss_blobs])
t0, t1, last_it = 0, 0, 0
n = np.zeros(len(loss_blobs))
sl = np.zeros(len(loss_blobs))
for it in range(nit):
s.step(1)
self.after_step()
l = np.array([np.sum(s.net.blobs[b].data) for b in loss_blobs])
n = avg_weight*n+1
sl = avg_weight*sl+l
ll = sl / n
if time()-t0 > print_interval:
print('[%s% 5d%s it | %s% 3d%s it / sec]\tl: %s%10.5g%s \tal: %s%10.5g%s [%s]'%(bcolors.BOLD, it, bcolors.ENDC, bcolors.BOLD, (it-last_it)/print_interval, bcolors.ENDC, bcolors.OKGREEN, l.dot(loss_weight), bcolors.ENDC, bcolors.OKGREEN, ll.dot(loss_weight), bcolors.ENDC, ' '.join(['%s = %7.2g'%(b,v) for b,v in zip(loss_blobs,ll)])))
if self.log is not None:
print('[% 5d it | % 3d it / sec]\tl: %10.5g [%s]'%(it, (it-last_it)/print_interval, l.dot(loss_weight), ' '.join(['%s = %7.2g'%(b,v) for b,v in zip(loss_blobs,l)])),
' \tal: %10.5g [%s]'%(ll.dot(loss_weight), ' '.join(['%s = %7.2g'%(b,v) for b,v in zip(loss_blobs,ll)])),file=self.log)
self.log.flush()
if show_debug:
print(' '*10+'\t',' '.join(['% 5s'%b[:5] for b in s.net.blobs]))
print(' '*6+'data\t', ' '.join(['%5.1g'%np.sum(np.abs(s.net.blobs[b].data)) for b in s.net.blobs]))
print(' '*6+'diff\t', ' '.join(['%5.1g'%np.sum(np.abs(s.net.blobs[b].diff)) for b in s.net.blobs]))
lrs = list(zip(s.net.layers, s.net._layer_names))
print(' '*10+'\t',' '.join(['% 11s'%n[:5] for l,n in lrs if len(l.blobs)>0]))
print(' '*6+'data\t', ' '.join(['%5.1g/%5.1g'%(np.sum(np.abs(l.blobs[0].data)),np.sum(np.abs(l.blobs[-1].data))) for l,n in lrs if len(l.blobs)>0]))
print(' '*6+'diff\t', ' '.join(['%5.1g/%5.1g'%(np.sum(np.abs(l.blobs[0].diff)),np.sum(np.abs(l.blobs[-1].diff))) for l,n in lrs if len(l.blobs)>0]))
print()
self.on_display()
import sys
sys.stdout.flush()
last_it = it
t0 = time()
if self.snap_file is not None and (time()-t1 > snap_interval or self.force_snapshot):
# Snapshot
s.net.save(self.snap_file)
self.force_snapshot = False
t1 = time()
if not self.running:
break
if self.final_file is not None:
s.net.save(self.final_file)
signal.signal(signal.SIGINT, signal.SIG_DFL)
return s.net
| colorization-master | caffe-colorization/python/caffe/solver.py |
from __future__ import print_function
from time import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def time_net(net, NIT=100, top=5):
import numpy as np
import caffe
if hasattr(caffe, 'wait_for_cuda'):
wait_for_cuda = caffe.wait_for_cuda
else:
print( 'wait_for_cuda function recommended for accurate timing (https://github.com/philkr/caffe/tree/wait_for_cuda)' )
def wait_for_cuda(): pass
L = net._layer_names
fwd = {n:[] for n in L}
bck = {n:[] for n in L}
T0 = time()
for i in range(NIT):
for j,n in enumerate(L):
t0 = time()
net._forward(j,j)
wait_for_cuda()
fwd[n].append(time()-t0)
for j,n in list(enumerate(L))[::-1]:
t0 = time()
net._backward(j,j)
wait_for_cuda()
bck[n].append(time()-t0)
top = min(top, len(fwd)+len(bck))
T = sorted([np.mean(v) for v in fwd.values()] + [np.mean(v) for v in bck.values()])[-top]
T0 = time()-T0
print("%s%0.1f%s it / sec [%s%0.1f%s ms / it]"%(bcolors.BOLD+bcolors().FAIL, NIT / T0, bcolors.ENDC, bcolors.BOLD+bcolors().FAIL, 1000*T0 / NIT, bcolors.ENDC))
for n in L:
cf, cb = bcolors.OKGREEN, bcolors.OKGREEN
if np.mean(fwd[n]) >= T: cf = bcolors.BOLD+bcolors().FAIL
if np.mean(bck[n]) >= T: cb = bcolors.BOLD+bcolors().FAIL
print(' %30s \t %s%0.2f \261 %0.1f%s ms \t %s%0.2f \261 %0.1f%s ms'%(n, cf, 1000*np.mean(fwd[n]), 1000*np.std(fwd[n]), bcolors.ENDC, cb, 1000*np.mean(bck[n]), 1000*np.std(bck[n]), bcolors.ENDC))
if __name__ == "__main__":
import argparse
from os import path
import caffe
parser = argparse.ArgumentParser(description="Visualize decompositions on sintel")
parser.add_argument('input_dir', help='input directory')
parser.add_argument('-n', type=int, default=100, help='Number of iterations')
parser.add_argument('-t', type=int, default=5, help='Highlight the top t times')
parser.add_argument('-gpu', type=int, help='What GPU do we test on')
args = parser.parse_args()
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if path.isfile(args.input_dir):
net = caffe.Net(args.input_dir, caffe.TRAIN)
else:
net = caffe.Net(args.input_dir+'trainval.prototxt', caffe.TRAIN)
time_net(net)
| colorization-master | caffe-colorization/python/caffe/timer.py |
"""Python net specification.
This module provides a way to write nets directly in Python, using a natural,
functional style. See examples/pycaffe/caffenet.py for an example.
Currently this works as a thin wrapper around the Python protobuf interface,
with layers and parameters automatically generated for the "layers" and
"params" pseudo-modules, which are actually objects using __getattr__ magic
to generate protobuf messages.
Note that when using to_proto or Top.to_proto, names of intermediate blobs will
be automatically generated. To explicitly specify blob names, use the NetSpec
class -- assign to its attributes directly to name layers, and call
NetSpec.to_proto to serialize all assigned layers.
This interface is expected to continue to evolve as Caffe gains new capabilities
for specifying nets. In particular, the automatically generated layer names
are not guaranteed to be forward-compatible.
"""
from collections import OrderedDict, Counter
from .proto import caffe_pb2
from google import protobuf
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [s for s in dir(layer) if s.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[:-len('_param')] for s in param_names]
param_type_names = [s[:-len('Parameter')] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly. For convenience,
repeated fields whose values are not lists are converted to single-element
lists; e.g., `my_repeated_int_field=3` is converted to
`my_repeated_int_field=[3]`."""
is_repeated_field = hasattr(getattr(proto, name), 'extend')
if is_repeated_field and not isinstance(val, list):
val = [val]
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several
produced by a layer.)"""
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self)
def _to_proto(self, layers, names, autonames):
return self.fn._to_proto(layers, names, autonames)
class Function(object):
"""A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers)."""
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
# use del to make sure kwargs are not double-processed as layer params
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, names, autonames):
if self not in names and self.ntop > 0:
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif self not in names:
autonames[self.type_name] += 1
names[self] = self.type_name + str(autonames[self.type_name])
return names[self]
def _get_top_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer,
_param_names[self.type_name] + '_param'), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names."""
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def to_proto(self):
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for name, top in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom."""
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 0:
return fn
elif fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn
class Parameters(object):
"""A Parameters object is a pseudo-module which generates constants used
in layer parameters; e.g., Parameters().Pooling.MAX is the value used
to specify max pooling."""
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()
_param_names = param_name_dict()
layers = Layers()
params = Parameters()
| colorization-master | caffe-colorization/python/caffe/net_spec.py |
import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovecor_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
| colorization-master | caffe-colorization/python/caffe/io.py |
"""
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import numpy as np
from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \
RMSPropSolver, AdaDeltaSolver, AdamSolver
import caffe.io
import six
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
if not hasattr(self, '_blobs_dict'):
self._blobs_dict = OrderedDict(zip(self._blob_names, self._blobs))
return self._blobs_dict
@property
def _Net_blob_loss_weights(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blob loss weights indexed by name
"""
if not hasattr(self, '_blobs_loss_weights_dict'):
self._blob_loss_weights_dict = OrderedDict(zip(self._blob_names,
self._blob_loss_weights))
return self._blob_loss_weights_dict
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
if not hasattr(self, '_params_dict'):
self._params_dict = OrderedDict([(name, lr.blobs)
for name, lr in zip(
self._layer_names, self.layers)
if len(lr.blobs) > 0])
return self._params_dict
@property
def _Net_inputs(self):
if not hasattr(self, '_input_list'):
keys = list(self.blobs.keys())
self._input_list = [keys[i] for i in self._inputs]
return self._input_list
@property
def _Net_outputs(self):
if not hasattr(self, '_output_list'):
keys = list(self.blobs.keys())
self._output_list = [keys[i] for i in self._outputs]
return self._output_list
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in six.iteritems(kwargs):
if blob.shape[0] != self.blobs[in_].shape[0]:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Parameters
----------
diffs : list of diffs to return in addition to bottom diffs.
kwargs : Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start : optional name of layer at which to begin the backward pass
end : optional name of layer at which to finish the backward pass
(inclusive)
Returns
-------
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in six.iteritems(kwargs):
if diff.shape[0] != self.blobs[top].shape[0]:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Parameters
----------
blobs : list of blobs to extract as in forward()
kwargs : Keys are input blob names and values are blob ndarrays.
Refer to forward().
Returns
-------
all_outs : {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Parameters
----------
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Yields
------
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).shape[0]
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
class _Net_IdNameWrapper:
"""
A simple wrapper that allows the ids propery to be accessed as a dict
indexed by names. Used for top and bottom names
"""
def __init__(self, net, func):
self.net, self.func = net, func
def __getitem__(self, name):
# Map the layer name to id
ids = self.func(self.net, list(self.net._layer_names).index(name))
# Map the blob id to name
id_to_name = list(self.net.blobs)
return [id_to_name[i] for i in ids]
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.blob_loss_weights = _Net_blob_loss_weights
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
Net.inputs = _Net_inputs
Net.outputs = _Net_outputs
Net.top_names = property(lambda n: _Net_IdNameWrapper(n, Net._top_ids))
Net.bottom_names = property(lambda n: _Net_IdNameWrapper(n, Net._bottom_ids))
| colorization-master | caffe-colorization/python/caffe/pycaffe.py |
from ._caffe import *
from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver
from .proto.caffe_pb2 import TRAIN, TEST
from .classifier import Classifier
from .detector import Detector
from . import io
from .net_spec import layers, params, NetSpec, to_proto
| colorization-master | caffe-colorization/python/caffe/__init__.py |
"""
Determine spatial relationships between layers to relate their coordinates.
Coordinates are mapped from input-to-output (forward), but can
be mapped output-to-input (backward) by the inverse mapping too.
This helps crop and align feature maps among other uses.
"""
from __future__ import division
import numpy as np
from caffe import layers as L
PASS_THROUGH_LAYERS = ['AbsVal', 'BatchNorm', 'Bias', 'BNLL', 'Dropout',
'Eltwise', 'ELU', 'Log', 'LRN', 'Exp', 'MVN', 'Power',
'ReLU', 'PReLU', 'Scale', 'Sigmoid', 'Split', 'TanH',
'Threshold']
def conv_params(fn):
"""
Extract the spatial parameters that determine the coordinate mapping:
kernel size, stride, padding, and dilation.
Implementation detail: Convolution, Deconvolution, and Im2col layers
define these in the convolution_param message, while Pooling has its
own fields in pooling_param. This method deals with these details to
extract canonical parameters.
"""
params = fn.params.get('convolution_param', fn.params)
axis = params.get('axis', 1)
ks = np.array(params['kernel_size'], ndmin=1)
dilation = np.array(params.get('dilation', 1), ndmin=1)
assert len({'pad_h', 'pad_w', 'kernel_h', 'kernel_w', 'stride_h',
'stride_w'} & set(fn.params)) == 0, \
'cropping does not support legacy _h/_w params'
return (axis, np.array(params.get('stride', 1), ndmin=1),
(ks - 1) * dilation + 1,
np.array(params.get('pad', 0), ndmin=1))
def crop_params(fn):
"""
Extract the crop layer parameters with defaults.
"""
params = fn.params.get('crop_param', fn.params)
axis = params.get('axis', 2) # default to spatial crop for N, C, H, W
offset = np.array(params.get('offset', 0), ndmin=1)
return (axis, offset)
class UndefinedMapException(Exception):
"""
Exception raised for layers that do not have a defined coordinate mapping.
"""
pass
def coord_map(fn):
"""
Define the coordinate mapping by its
- axis
- scale: output coord[i * scale] <- input_coord[i]
- shift: output coord[i] <- output_coord[i + shift]
s.t. the identity mapping, as for pointwise layers like ReLu, is defined by
(None, 1, 0) since it is independent of axis and does not transform coords.
"""
if fn.type_name in ['Convolution', 'Pooling', 'Im2col']:
axis, stride, ks, pad = conv_params(fn)
return axis, 1 / stride, (pad - (ks - 1) / 2) / stride
elif fn.type_name == 'Deconvolution':
axis, stride, ks, pad = conv_params(fn)
return axis, stride, (ks - 1) / 2 - pad
elif fn.type_name in PASS_THROUGH_LAYERS:
return None, 1, 0
elif fn.type_name == 'Crop':
axis, offset = crop_params(fn)
axis -= 1 # -1 for last non-coordinate dim.
return axis, 1, - offset
else:
raise UndefinedMapException
class AxisMismatchException(Exception):
"""
Exception raised for mappings with incompatible axes.
"""
pass
def compose(base_map, next_map):
"""
Compose a base coord map with scale a1, shift b1 with a further coord map
with scale a2, shift b2. The scales multiply and the further shift, b2,
is scaled by base coord scale a1.
"""
ax1, a1, b1 = base_map
ax2, a2, b2 = next_map
if ax1 is None:
ax = ax2
elif ax2 is None or ax1 == ax2:
ax = ax1
else:
raise AxisMismatchException
return ax, a1 * a2, a1 * b2 + b1
def inverse(coord_map):
"""
Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient.
"""
ax, a, b = coord_map
return ax, 1 / a, -b / a
def coord_map_from_to(top_from, top_to):
"""
Determine the coordinate mapping betweeen a top (from) and a top (to).
Walk the graph to find a common ancestor while composing the coord maps for
from and to until they meet. As a last step the from map is inverted.
"""
# We need to find a common ancestor of top_from and top_to.
# We'll assume that all ancestors are equivalent here (otherwise the graph
# is an inconsistent state (which we could improve this to check for)).
# For now use a brute-force algorithm.
def collect_bottoms(top):
"""
Collect the bottoms to walk for the coordinate mapping.
The general rule is that all the bottoms of a layer can be mapped, as
most layers have the same coordinate mapping for each bottom.
Crop layer is a notable exception. Only the first/cropped bottom is
mappable; the second/dimensions bottom is excluded from the walk.
"""
bottoms = top.fn.inputs
if top.fn.type_name == 'Crop':
bottoms = bottoms[:1]
return bottoms
# walk back from top_from, keeping the coord map as we go
from_maps = {top_from: (None, 1, 0)}
frontier = {top_from}
while frontier:
top = frontier.pop()
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
from_maps[bottom] = compose(from_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
pass
# now walk back from top_to until we hit a common blob
to_maps = {top_to: (None, 1, 0)}
frontier = {top_to}
while frontier:
top = frontier.pop()
if top in from_maps:
return compose(to_maps[top], inverse(from_maps[top]))
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
to_maps[bottom] = compose(to_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
continue
# if we got here, we did not find a blob in common
raise RuntimeError('Could not compute map between tops; are they '
'connected by spatial layers?')
def crop(top_from, top_to):
"""
Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop.
"""
ax, a, b = coord_map_from_to(top_from, top_to)
assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a)
assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b)
assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \
'(b = {})'.format(b)
return L.Crop(top_from, top_to,
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
offset=list(-np.round(b).astype(int))))
| colorization-master | caffe-colorization/python/caffe/coord_map.py |
"""
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name,
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
| colorization-master | caffe-colorization/python/caffe/draw.py |
from __future__ import division
import caffe
import numpy as np
import os
import sys
from datetime import datetime
from PIL import Image
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)
def compute_hist(net, save_dir, dataset, layer='score', gt='label', loss_layer='loss'):
n_cl = net.blobs[layer].channels
if save_dir:
if(not os.path.exists(save_dir)):
os.mkdir(save_dir)
hist = np.zeros((n_cl, n_cl))
loss = 0
for idx in dataset:
net.forward()
hist += fast_hist(net.blobs[gt].data[0, 0].flatten(),
net.blobs[layer].data[0].argmax(0).flatten(),
n_cl)
if save_dir:
im = Image.fromarray(net.blobs[layer].data[0].argmax(0).astype(np.uint8), mode='P')
im.save(os.path.join(save_dir, idx + '.png'))
# compute the loss as well
loss += net.blobs[loss_layer].data.flat[0]
return hist, loss / len(dataset)
def seg_tests(solver, save_format, dataset, layer='score', gt='label', loss_layer='loss', f=-1):
print '>>>', datetime.now(), 'Begin seg tests'
solver.test_nets[0].share_with(solver.net)
do_seg_tests(solver.test_nets[0], solver.iter, save_format, dataset, layer, gt, loss_layer=loss_layer, f=f)
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label', loss_layer='loss', f=-1):
if(np.array(f).flatten()[0]==-1):
PRINT_TO_FILE = False # print to string
else:
PRINT_TO_FILE = True
n_cl = net.blobs[layer].channels
if save_format:
save_format = save_format.format(iter)
hist, loss = compute_hist(net, save_format, dataset, layer, gt, loss_layer=loss_layer)
# mean loss
print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
# per-class IU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
(freq[freq > 0] * iu[freq > 0]).sum()
if(PRINT_TO_FILE):
f.write('Iteration: %i\n'%(iter))
f.write('Loss: %.4f\n'%loss)
acc = np.diag(hist).sum() / hist.sum()
f.write('Overall Accuracy: %.4f\n'%(acc))
# f.write('Iteration' + iter + 'overall accuracy' + acc)
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
f.write('Mean Accuracy: %.4f\n'%(np.nanmean(acc)))
# per-class IU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
f.write('Mean IU: %.4f\n'%(np.nanmean(iu)))
# f.write('Iteration' + iter + 'mean IU' + np.nanmean(iu))
freq = hist.sum(1) / hist.sum()
f.write('fwavacc: %.4f\n'%(freq[freq > 0] * iu[freq > 0]).sum())
f.write('\n')
| colorization-master | caffe-colorization/python/caffe/score.py |
import unittest
import tempfile
import os
import numpy as np
import six
import caffe
def simple_net_file(num_output):
"""Make a simple net prototxt, based on test_net.cpp, returning the name
of the (temporary) file."""
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""name: 'testnet' force_backward: true
layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
dummy_data_param { num: 5 channels: 2 height: 3 width: 4
num: 5 channels: 1 height: 1 width: 1
data_filler { type: 'gaussian' std: 1 }
data_filler { type: 'constant' } } }
layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'
convolution_param { num_output: 11 kernel_size: 2 pad: 3
weight_filler { type: 'gaussian' std: 1 }
bias_filler { type: 'constant' value: 2 } }
param { decay_mult: 1 } param { decay_mult: 0 }
}
layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip'
inner_product_param { num_output: """ + str(num_output) + """
weight_filler { type: 'gaussian' std: 2.5 }
bias_filler { type: 'constant' value: -3 } } }
layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label'
top: 'loss' }""")
f.close()
return f.name
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
# fill in valid labels
self.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
"""Check that holding onto blob data beyond the life of a Net is OK"""
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
for bl in blobs:
total += bl.data.sum() + bl.diff.sum()
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, f.name, caffe.TRAIN)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
| colorization-master | caffe-colorization/python/caffe/test/test_net.py |
import unittest
import tempfile
import caffe
from caffe import layers as L
from caffe import params as P
def lenet(batch_size):
n = caffe.NetSpec()
n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=10,
weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
def anon_lenet(batch_size):
data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
conv1 = L.Convolution(data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
conv2 = L.Convolution(pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
ip1 = L.InnerProduct(pool2, num_output=500,
weight_filler=dict(type='xavier'))
relu1 = L.ReLU(ip1, in_place=True)
ip2 = L.InnerProduct(relu1, num_output=10,
weight_filler=dict(type='xavier'))
loss = L.SoftmaxWithLoss(ip2, label)
return loss.to_proto()
def silent_net():
n = caffe.NetSpec()
n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2)
n.silence_data = L.Silence(n.data, ntop=0)
n.silence_data2 = L.Silence(n.data2, ntop=0)
return n.to_proto()
class TestNetSpec(unittest.TestCase):
def load_net(self, net_proto):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write(str(net_proto))
f.close()
return caffe.Net(f.name, caffe.TEST)
def test_lenet(self):
"""Construct and build the Caffe version of LeNet."""
net_proto = lenet(50)
# check that relu is in-place
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
# check that all layers are present
self.assertEqual(len(net.layers), 9)
# now the check the version with automatically-generated layer names
net_proto = anon_lenet(50)
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
self.assertEqual(len(net.layers), 9)
def test_zero_tops(self):
"""Test net construction for top-less layers."""
net_proto = silent_net()
net = self.load_net(net_proto)
self.assertEqual(len(net.forward()), 0)
| colorization-master | caffe-colorization/python/caffe/test/test_net_spec.py |
import unittest
import caffe
class TestLayerTypeList(unittest.TestCase):
def test_standard_types(self):
#removing 'Data' from list
for type_name in ['Data', 'Convolution', 'InnerProduct']:
self.assertIn(type_name, caffe.layer_type_list(),
'%s not in layer_type_list()' % type_name)
| colorization-master | caffe-colorization/python/caffe/test/test_layer_type_list.py |
import unittest
import tempfile
import os
import six
import caffe
class SimpleLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
pass
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = 10 * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = 10 * top[0].diff
class ExceptionLayer(caffe.Layer):
"""A layer for checking exceptions from Python"""
def setup(self, bottom, top):
raise RuntimeError
class ParameterLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
self.blobs.add_blob(1)
self.blobs[0].data[0] = 0
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
pass
def backward(self, top, propagate_down, bottom):
self.blobs[0].diff[0] = 1
def python_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""")
return f.name
def exception_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } }
""")
return f.name
def parameter_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ParameterLayer' } }
""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestPythonLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, 10**3 * x)
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 10**3 * x)
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in six.itervalues(self.net.blobs):
for d in blob.data.shape:
self.assertEqual(s, d)
def test_exception(self):
net_file = exception_net_file()
self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST)
os.remove(net_file)
def test_parameter(self):
net_file = parameter_net_file()
net = caffe.Net(net_file, caffe.TRAIN)
# Test forward and backward
net.forward()
net.backward()
layer = net.layers[list(net._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 0)
self.assertEqual(layer.blobs[0].diff[0], 1)
layer.blobs[0].data[0] += layer.blobs[0].diff[0]
self.assertEqual(layer.blobs[0].data[0], 1)
# Test saving and loading
h, caffemodel_file = tempfile.mkstemp()
net.save(caffemodel_file)
layer.blobs[0].data[0] = -1
self.assertEqual(layer.blobs[0].data[0], -1)
net.copy_from(caffemodel_file)
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(caffemodel_file)
# Test weight sharing
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.share_with(net)
layer = net.layers[list(net2._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(net_file)
| colorization-master | caffe-colorization/python/caffe/test/test_python_layer.py |
import numpy as np
import unittest
import caffe
class TestBlobProtoToArray(unittest.TestCase):
def test_old_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
shape = (1,1,10,10)
blob.num, blob.channels, blob.height, blob.width = shape
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, shape)
def test_new_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
blob.shape.dim.extend(list(data.shape))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, data.shape)
def test_no_shape(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
with self.assertRaises(ValueError):
caffe.io.blobproto_to_array(blob)
def test_scalar(self):
data = np.ones((1)) * 123
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr, 123)
class TestArrayToDatum(unittest.TestCase):
def test_label_none_size(self):
# Set label
d1 = caffe.io.array_to_datum(
np.ones((10,10,3)), label=1)
# Don't set label
d2 = caffe.io.array_to_datum(
np.ones((10,10,3)))
# Not setting the label should result in a smaller object
self.assertGreater(
len(d1.SerializeToString()),
len(d2.SerializeToString()))
| colorization-master | caffe-colorization/python/caffe/test/test_io.py |
import unittest
import tempfile
import os
import six
import caffe
class SimpleParamLayer(caffe.Layer):
"""A layer that just multiplies by the numeric value of its param string"""
def setup(self, bottom, top):
try:
self.value = float(self.param_str)
except ValueError:
raise ValueError("Parameter string must be a legible float")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = self.value * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = self.value * top[0].diff
def python_param_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '10' } }
layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '2' } }""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = python_param_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['mul2'].data.flat:
self.assertEqual(y, 2 * 10 * x)
def test_backward(self):
x = 7
self.net.blobs['mul2'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 2 * 10 * x)
| colorization-master | caffe-colorization/python/caffe/test/test_python_layer_with_param_str.py |
import unittest
import tempfile
import os
import numpy as np
import six
import caffe
from test_net import simple_net_file
class TestSolver(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_f = simple_net_file(self.num_output)
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""net: '""" + net_f + """'
test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
display: 100 max_iter: 100 snapshot_after_train: false
snapshot_prefix: "model" """)
f.close()
self.solver = caffe.SGDSolver(f.name)
# also make sure get_solver runs
caffe.get_solver(f.name)
caffe.set_mode_cpu()
# fill in valid labels
self.solver.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.net.blobs['label'].data.shape)
self.solver.test_nets[0].blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.test_nets[0].blobs['label'].data.shape)
os.remove(f.name)
os.remove(net_f)
def test_solve(self):
self.assertEqual(self.solver.iter, 0)
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
def test_net_memory(self):
"""Check that nets survive after the solver is destroyed."""
nets = [self.solver.net] + list(self.solver.test_nets)
self.assertEqual(len(nets), 2)
del self.solver
total = 0
for net in nets:
for ps in six.itervalues(net.params):
for p in ps:
total += p.data.sum() + p.diff.sum()
for bl in six.itervalues(net.blobs):
total += bl.data.sum() + bl.diff.sum()
def test_snapshot(self):
self.solver.snapshot()
# Check that these files exist and then remove them
files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']
for fn in files:
assert os.path.isfile(fn)
os.remove(fn)
| colorization-master | caffe-colorization/python/caffe/test/test_solver.py |
import unittest
import numpy as np
import random
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.coord_map import coord_map_from_to, crop
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n
class TestCoordMap(unittest.TestCase):
def setUp(self):
pass
def test_conv_pool_deconv(self):
"""
Map through conv, pool, and deconv.
"""
n = coord_net_spec()
# identity for 2x pool, 2x deconv
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, 0)
# shift-by-one for 4x pool, 4x deconv
n = coord_net_spec(pool=4, dstride=4)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, -1)
def test_pass(self):
"""
A pass-through layer (ReLU) and conv (1x1, stride 1, pad 0)
both do identity mapping.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.relu = L.ReLU(n.deconv)
n.conv1x1 = L.Convolution(
n.relu, num_output=10, kernel_size=1, stride=1, pad=0)
for top in [n.relu, n.conv1x1]:
ax_pass, a_pass, b_pass = coord_map_from_to(top, n.data)
self.assertEquals(ax, ax_pass)
self.assertEquals(a, a_pass)
self.assertEquals(b, b_pass)
def test_padding(self):
"""
Padding conv adds offset while padding deconv subtracts offset.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
pad = random.randint(0, 10)
# conv padding
n = coord_net_spec(pad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b - pad, b_pad)
# deconv padding
n = coord_net_spec(dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b + pad, b_pad)
# pad both to cancel out
n = coord_net_spec(pad=pad, dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b, b_pad)
def test_multi_conv(self):
"""
Multiple bottoms/tops of a layer are identically mapped.
"""
n = coord_net_spec()
# multi bottom/top
n.conv_data, n.conv_aux = L.Convolution(
n.data, n.aux, ntop=2, num_output=10, kernel_size=5, stride=2,
pad=0)
ax1, a1, b1 = coord_map_from_to(n.conv_data, n.data)
ax2, a2, b2 = coord_map_from_to(n.conv_aux, n.aux)
self.assertEquals(ax1, ax2)
self.assertEquals(a1, a2)
self.assertEquals(b1, b2)
def test_rect(self):
"""
Anisotropic mapping is equivalent to its isotropic parts.
"""
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
def test_nd_conv(self):
"""
ND conv maps the same way in more dimensions.
"""
n = caffe.NetSpec()
# define data with 3 spatial dimensions, otherwise the same net
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],
pad=[0, 1, 2])
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=4, stride=2, pad=0)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertTrue(len(a) == len(b))
self.assertTrue(np.all(a == 1))
self.assertEquals(b[0] - 1, b[1])
self.assertEquals(b[1] - 1, b[2])
def test_crop_of_crop(self):
"""
Map coordinates through Crop layer:
crop an already-cropped output to the input and check change in offset.
"""
n = coord_net_spec()
offset = random.randint(0, 10)
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.crop = L.Crop(n.deconv, n.data, axis=2, offset=offset)
ax_crop, a_crop, b_crop = coord_map_from_to(n.crop, n.data)
self.assertEquals(ax, ax_crop)
self.assertEquals(a, a_crop)
self.assertEquals(b + offset, b_crop)
def test_crop_helper(self):
"""
Define Crop layer by crop().
"""
n = coord_net_spec()
crop(n.deconv, n.data)
def test_catch_unconnected(self):
"""
Catch mapping spatially unconnected tops.
"""
n = coord_net_spec()
n.ip = L.InnerProduct(n.deconv, num_output=10)
with self.assertRaises(RuntimeError):
coord_map_from_to(n.ip, n.data)
def test_catch_scale_mismatch(self):
"""
Catch incompatible scales, such as when the top to be cropped
is mapped to a differently strided reference top.
"""
n = coord_net_spec(pool=3, dstride=2) # pool 3x but deconv 2x
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
def test_catch_negative_crop(self):
"""
Catch impossible offsets, such as when the top to be cropped
is mapped to a larger reference top.
"""
n = coord_net_spec(dpad=10) # make output smaller than input
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
| colorization-master | caffe-colorization/python/caffe/test/test_coord_map.py |
from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
# helper function for common structures
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def caffenet(lmdb, batch_size=256, include_acc=False):
data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
# the net itself
conv1, relu1 = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
fc6, relu6 = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
fc7, relu7 = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)
if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto(loss, acc)
else:
return to_proto(loss)
def make_net():
with open('train.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-train-lmdb'), file=f)
with open('test.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)
if __name__ == '__main__':
make_net()
| colorization-master | caffe-colorization/examples/pycaffe/caffenet.py |
import numpy as np
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, mean=[128, 128, 128]):
self.mean = np.array(mean, dtype=np.float32)
self.scale = 1.0
def set_mean(self, mean):
"""
Set the mean to subtract for centering the data.
"""
self.mean = mean
def set_scale(self, scale):
"""
Set the data scaling.
"""
self.scale = scale
def preprocess(self, im):
"""
preprocess() emulate the pre-processing occuring in the vgg16 caffe
prototxt.
"""
im = np.float32(im)
im = im[:, :, ::-1] # change to BGR
im -= self.mean
im *= self.scale
im = im.transpose((2, 0, 1))
return im
def deprocess(self, im):
"""
inverse of preprocess()
"""
im = im.transpose(1, 2, 0)
im /= self.scale
im += self.mean
im = im[:, :, ::-1] # change to RGB
return np.uint8(im)
class CaffeSolver:
"""
Caffesolver is a class for creating a solver.prototxt file. It sets default
values and can export a solver parameter file.
Note that all parameters are stored as strings. Strings variables are
stored as strings in strings.
"""
def __init__(self, testnet_prototxt_path="testnet.prototxt",
trainnet_prototxt_path="trainnet.prototxt", debug=False):
self.sp = {}
# critical:
self.sp['base_lr'] = '0.001'
self.sp['momentum'] = '0.9'
# speed:
self.sp['test_iter'] = '100'
self.sp['test_interval'] = '250'
# looks:
self.sp['display'] = '25'
self.sp['snapshot'] = '2500'
self.sp['snapshot_prefix'] = '"snapshot"' # string withing a string!
# learning rate policy
self.sp['lr_policy'] = '"fixed"'
# important, but rare:
self.sp['gamma'] = '0.1'
self.sp['weight_decay'] = '0.0005'
self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
self.sp['test_net'] = '"' + testnet_prototxt_path + '"'
# pretty much never change these.
self.sp['max_iter'] = '100000'
self.sp['test_initialization'] = 'false'
self.sp['average_loss'] = '25' # this has to do with the display.
self.sp['iter_size'] = '1' # this is for accumulating gradients
if (debug):
self.sp['max_iter'] = '12'
self.sp['test_iter'] = '1'
self.sp['test_interval'] = '4'
self.sp['display'] = '1'
def add_from_file(self, filepath):
"""
Reads a caffe solver prototxt file and updates the Caffesolver
instance parameters.
"""
with open(filepath, 'r') as f:
for line in f:
if line[0] == '#':
continue
splitLine = line.split(':')
self.sp[splitLine[0].strip()] = splitLine[1].strip()
def write(self, filepath):
"""
Export solver parameters to INPUT "filepath". Sorted alphabetically.
"""
f = open(filepath, 'w')
for key, value in sorted(self.sp.items()):
if not(type(value) is str):
raise TypeError('All solver parameters must be strings')
f.write('%s: %s\n' % (key, value))
| colorization-master | caffe-colorization/examples/pycaffe/tools.py |
# imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import caffe
import numpy as np
import os.path as osp
from xml.dom import minidom
from random import shuffle
from threading import Thread
from PIL import Image
from tools import SimpleTransformer
class PascalMultilabelDataLayerSync(caffe.Layer):
"""
This is a simple syncronous datalayer for training a multilabel model on
PASCAL.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the paramameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(self.batch_size, 20)
print_info("PascalMultilabelDataLayerSync", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, multilabel = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = multilabel
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.pascal_root = params['pascal_root']
self.im_shape = params['im_shape']
# get list of image indexes.
list_file = params['split'] + '.txt'
self.indexlist = [line.rstrip('\n') for line in open(
osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer()
print "BatchLoader initialized with {} images".format(
len(self.indexlist))
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.indexlist):
self._cur = 0
shuffle(self.indexlist)
# Load an image
index = self.indexlist[self._cur] # Get the image index
image_file_name = index + '.jpg'
im = np.asarray(Image.open(
osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
im = scipy.misc.imresize(im, self.im_shape) # resize
# do a simple horizontal flip as data augmentation
flip = np.random.choice(2)*2-1
im = im[:, ::flip, :]
# Load and prepare ground truth
multilabel = np.zeros(20).astype(np.float32)
anns = load_pascal_annotation(index, self.pascal_root)
for label in anns['gt_classes']:
# in the multilabel problem we don't care how MANY instances
# there are of each class. Only if they are present.
# The "-1" is b/c we are not interested in the background
# class.
multilabel[label - 1] = 1
self._cur += 1
return self.transformer.preprocess(im), multilabel
def load_pascal_annotation(index, pascal_root):
"""
This code is borrowed from Ross Girshick's FAST-RCNN code
(https://github.com/rbgirshick/fast-rcnn).
It parses the PASCAL .xml metadata files.
See publication for further details: (http://arxiv.org/abs/1504.08083).
Thanks Ross!
"""
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
class_to_ind = dict(zip(classes, xrange(21)))
filename = osp.join(pascal_root, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, 21), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'index': index}
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
assert 'split' in params.keys(
), 'Params must include split (train, val, or test).'
required = ['batch_size', 'pascal_root', 'im_shape']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
def print_info(name, params):
"""
Ouput some info regarding the class
"""
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['split'],
params['batch_size'],
params['im_shape'])
| colorization-master | caffe-colorization/examples/pycaffe/layers/pascal_multilabel_datalayers.py |
import caffe
import numpy as np
class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
| colorization-master | caffe-colorization/examples/pycaffe/layers/pyloss.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.