python_code
stringlengths 0
229k
|
---|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanRelativeAbsoluteError()
with pytest.raises(
NotComputableError,
match=r"GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed",
):
m.compute()
def test_wrong_input_shapes():
m = GeometricMeanRelativeAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
m = GeometricMeanRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_gmrae == pytest.approx(m.compute())
def test_integration():
y_pred = torch.rand(size=(100,))
y = torch.rand(size=(100,))
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = GeometricMeanRelativeAbsoluteError()
m.attach(engine, "gmrae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
gmrae = engine.run(data, max_epochs=1).metrics["gmrae"]
sum_errors = np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(gmrae)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = GeometricMeanRelativeAbsoluteError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
assert m.compute() == pytest.approx(np_gmrae, rel=1e-4)
for i in range(3):
torch.manual_seed(12 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
torch.manual_seed(12)
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
gmrae = GeometricMeanRelativeAbsoluteError(device=metric_device)
gmrae.attach(engine, "gmrae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "gmrae" in engine.state.metrics
res = engine.state.metrics["gmrae"]
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
assert pytest.approx(res, rel=1e-4) == np_gmrae
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalAbsoluteError()
with pytest.raises(
NotComputableError, match=r"FractionalAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = FractionalAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = FractionalAbsoluteError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * np.abs((a - ground_truth)) / (np.abs(a) + np.abs(ground_truth))).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((b - ground_truth)) / (np.abs(b) + np.abs(ground_truth))).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((c - ground_truth)) / (np.abs(c) + np.abs(ground_truth))).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((d - ground_truth)) / (np.abs(d) + np.abs(ground_truth))).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = FractionalAbsoluteError()
m.attach(engine, "fab")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
fab = engine.run(data, max_epochs=1).metrics["fab"]
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(fab)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = FractionalAbsoluteError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
fae = FractionalAbsoluteError(device=metric_device)
fae.attach(engine, "fae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "fae" in engine.state.metrics
res = engine.state.metrics["fae"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsoluteError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianAbsoluteError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
m = MedianAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_error == pytest.approx(m.compute())
def test_median_absolute_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
m = MedianAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_error == pytest.approx(m.compute())
def test_integration_median_absolute_error():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianAbsoluteError()
m.attach(engine, "median_absolute_error")
data = list(range(size // batch_size))
median_absolute_error = engine.run(data, max_epochs=1).metrics["median_absolute_error"]
assert np_median_absolute_error == pytest.approx(median_absolute_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianAbsoluteError(device=metric_device)
size = 105
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred)
np_res = np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 105
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MedianAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds)
np_res = np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(10 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianRelativeAbsoluteError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianRelativeAbsoluteError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_relative_absolute_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
m = MedianRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_relative_error == pytest.approx(m.compute())
def test_median_relative_absolute_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
m = MedianRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters + 1):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_relative_error == pytest.approx(m.compute())
def test_integration_median_relative_absolute_error_with_output_transform():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianRelativeAbsoluteError()
m.attach(engine, "median_absolute_relative_error")
data = list(range(size // batch_size))
median_absolute_relative_error = engine.run(data, max_epochs=1).metrics["median_absolute_relative_error"]
assert np_median_absolute_relative_error == pytest.approx(median_absolute_relative_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianRelativeAbsoluteError(device=metric_device)
torch.manual_seed(10 + rank)
size = 151
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())
np_res = np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
size = 151
y_true = torch.rand(size=(size,)).to(device)
y_preds = torch.rand(size=(size,)).to(device)
def update(engine, i):
return (
y_preds[i * size : (i + 1) * size],
y_true[i * size : (i + 1) * size],
)
engine = Engine(update)
m = MedianRelativeAbsoluteError(device=metric_device)
m.attach(engine, "mare")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "mare" in engine.state.metrics
res = engine.state.metrics["mare"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true - np_y_true.mean())
np_res = np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from typing import Optional
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression, _torch_median
def test_base_regression_shapes():
class L1(_BaseRegression):
def reset(self):
self._sum_of_errors = 0.0
def _update(self, output):
y_pred, y = output
errors = torch.abs(y.view_as(y_pred) - y_pred)
self._sum_of_errors += torch.sum(errors).item()
def compute(self):
return self._sum_of_errors
m = L1()
with pytest.raises(ValueError, match=r"Input y_pred should have shape \(N,\) or \(N, 1\)"):
y = torch.rand([1, 1, 1])
m.update((y, y))
with pytest.raises(ValueError, match=r"Input y should have shape \(N,\) or \(N, 1\)"):
y = torch.rand([1, 1, 1])
m.update((torch.rand(1, 1), y))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(2), torch.rand(2, 1)))
with pytest.raises(TypeError, match=r"Input y_pred dtype should be float"):
y = torch.tensor([1, 1])
m.update((y, y))
with pytest.raises(TypeError, match=r"Input y dtype should be float"):
y = torch.tensor([1, 1])
m.update((y.float(), y))
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
def test_torch_median_numpy(size, device: Optional[str] = None):
data = torch.rand(size).to(device)
assert _torch_median(data) == np.median(data.cpu().numpy())
@pytest.mark.tpu
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_on_even_size_xla(size):
device = "xla"
test_torch_median_numpy(size, device=device)
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_on_even_size_gpu(size):
test_torch_median_numpy(size, device="cuda")
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
def test_create_even_size_cpu(size):
test_torch_median_numpy(size, device="cpu")
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalBias()
with pytest.raises(
NotComputableError, match=r"FractionalBias must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = FractionalBias()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_fractional_bias():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = FractionalBias()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * (ground_truth - a) / (a + ground_truth)).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - b) / (b + ground_truth)).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - c) / (c + ground_truth)).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - d) / (d + ground_truth)).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = FractionalBias()
m.attach(engine, "fb")
np_y = y.double().numpy().ravel()
np_y_pred = y_pred.double().numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
fb = engine.run(data, max_epochs=1).metrics["fb"]
np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y)).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(fb)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = FractionalBias()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device, tol=1e-5):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = FractionalBias(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y + 1e-30)).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(res, rel=tol)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device, tol=1e-5):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,), dtype=torch.double).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,), dtype=torch.double).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = FractionalBias(device=metric_device)
m.attach(engine, "fb")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "fb" in engine.state.metrics
res = engine.state.metrics["fb"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = (2 * (np_y_true - np_y_preds) / (np_y_preds + np_y_true + 1e-30)).sum()
np_len = len(y_preds)
np_ans = np_sum / np_len
assert pytest.approx(res, rel=tol) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device, tol=1e-4)
_test_distrib_integration(device, tol=1e-4)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device, tol=1e-4)
_test_distrib_integration(device, tol=1e-4)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanError()
with pytest.raises(NotComputableError, match=r"MeanError must have at least one example before it can be computed"):
m.compute()
def test_wrong_input_shapes():
m = MeanError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MeanError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (ground_truth - a).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - b).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - c).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - d).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanError()
m.attach(engine, "me")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
me = engine.run(data, max_epochs=1).metrics["me"]
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(me, rel=1e-4)
def get_test_cases():
test_cases = [
(torch.rand(size=(50,)), torch.rand(size=(50,)), 1),
(torch.rand(size=(50, 1)), torch.rand(size=(50, 1)), 10),
]
return test_cases
for _ in range(5):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device, tol=1e-5):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
me = MeanError(device=metric_device)
me.attach(engine, "me")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "me" in engine.state.metrics
res = engine.state.metrics["me"]
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert pytest.approx(res, rel=tol) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from pytest import approx, raises
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanAbsoluteRelativeError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_wrong_input_shapes():
m = MeanAbsoluteRelativeError()
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_absolute_relative_error():
a = torch.rand(4)
b = torch.rand(4)
c = torch.rand(4)
d = torch.rand(4)
ground_truth = torch.rand(4)
m = MeanAbsoluteRelativeError()
m.update((a, ground_truth))
abs_error_a = torch.sum(torch.abs(ground_truth - a) / torch.abs(ground_truth))
num_samples_a = a.size()[0]
sum_error = abs_error_a
sum_samples = num_samples_a
MARE_a = sum_error / sum_samples
assert m.compute() == approx(MARE_a.item())
m.update((b, ground_truth))
abs_error_b = torch.sum(torch.abs(ground_truth - b) / torch.abs(ground_truth))
num_samples_b = b.size()[0]
sum_error += abs_error_b
sum_samples += num_samples_b
MARE_b = sum_error / sum_samples
assert m.compute() == approx(MARE_b.item())
m.update((c, ground_truth))
abs_error_c = torch.sum(torch.abs(ground_truth - c) / torch.abs(ground_truth))
num_samples_c = c.size()[0]
sum_error += abs_error_c
sum_samples += num_samples_c
MARE_c = sum_error / sum_samples
assert m.compute() == approx(MARE_c.item())
m.update((d, ground_truth))
abs_error_d = torch.sum(torch.abs(ground_truth - d) / torch.abs(ground_truth))
num_samples_d = d.size()[0]
sum_error += abs_error_d
sum_samples += num_samples_d
MARE_d = sum_error / sum_samples
assert m.compute() == approx(MARE_d.item())
def test_zero_div():
a = torch.tensor([2.0, -1.0, -1.0, 2.0])
ground_truth = torch.tensor([0.0, 0.5, 0.2, 1.0])
m = MeanAbsoluteRelativeError()
with raises(NotComputableError, match=r"The ground truth has 0"):
m.update((a, ground_truth))
def test_zero_sample():
m = MeanAbsoluteRelativeError()
with raises(NotComputableError, match=r"MeanAbsoluteRelativeError must have at least one sample"):
m.compute()
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanAbsoluteRelativeError()
m.attach(engine, "mare")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mare = engine.run(data, max_epochs=1).metrics["mare"]
abs_error = np.sum(abs(np_y - np_y_pred) / abs(np_y))
num_samples = len(y_pred)
res = abs_error / num_samples
assert res == approx(mare)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanAbsoluteRelativeError(device=metric_device)
y_pred = torch.randint(1, 11, size=(10,), device=device).float()
y = torch.randint(1, 11, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
abs_error = np.sum(abs(np_y - np_y_pred) / abs(np_y))
num_samples = len(y_pred)
np_res = abs_error / num_samples
assert np_res == approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanAbsoluteRelativeError(device=metric_device)
m.attach(engine, "mare")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mare" in engine.state.metrics
mare = engine.state.metrics["mare"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
abs_error = np.sum(abs(np_y_true - np_y_preds) / abs(np_y_true))
num_samples = len(y_preds)
np_res = abs_error / num_samples
assert approx(mare) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MaximumAbsoluteError()
with pytest.raises(
NotComputableError, match=r"MaximumAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MaximumAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_maximum_absolute_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MaximumAbsoluteError()
np_ans = -1
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((a - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((b - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((c - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((d - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MaximumAbsoluteError()
m.attach(engine, "mae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mae = engine.run(data, max_epochs=1).metrics["mae"]
np_max = np.max(np.abs((np_y_pred - np_y)))
assert np_max == pytest.approx(mae)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MaximumAbsoluteError(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_max = np.max(np.abs((np_y_pred - np_y)))
assert np_max == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MaximumAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_max = np.max(np.abs((np_y_preds - np_y_true)))
assert pytest.approx(res) == np_max
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import r2_score
import ignite.distributed as idist
from ignite.contrib.metrics.regression import R2Score
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = R2Score()
with pytest.raises(NotComputableError, match=r"R2Score must have at least one example before it can be computed"):
m.compute()
def test_wrong_input_shapes():
m = R2Score()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_r2_score():
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
m = R2Score()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
def test_r2_score_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
m = R2Score()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
def test_integration_r2_score():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = R2Score()
m.attach(engine, "r2_score")
data = list(range(size // batch_size))
r_squared = engine.run(data, max_epochs=1).metrics["r2_score"]
assert r2_score(np_y, np_y_pred) == pytest.approx(r_squared)
def _test_distrib_compute(device, tol=1e-6):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = R2Score(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert r2_score(np_y, np_y_pred) == pytest.approx(res, abs=tol)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.randint(0, 10, size=(n_iters * batch_size,)).to(device).float()
y_preds = torch.randint(0, 10, size=(n_iters * batch_size,)).to(device).float()
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
r2 = R2Score(device=metric_device)
r2.attach(engine, "r2")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "r2" in engine.state.metrics
res = engine.state.metrics["r2"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = r2_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device, tol=1e-3)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device, tol=1e-3)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
import torch.nn as nn
from torch.utils.data.distributed import DistributedSampler
import ignite.contrib.handlers as handlers
import ignite.distributed as idist
from ignite.contrib.engines.common import (
_setup_logging,
add_early_stopping_by_val_score,
gen_save_best_models_by_val_score,
save_best_model_by_val_score,
setup_any_logging,
setup_clearml_logging,
setup_common_training_handlers,
setup_mlflow_logging,
setup_neptune_logging,
setup_plx_logging,
setup_tb_logging,
setup_trains_logging,
setup_visdom_logging,
setup_wandb_logging,
)
from ignite.engine import Engine, Events
from ignite.handlers import DiskSaver, TerminateOnNan
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
def _test_setup_common_training_handlers(
dirname,
device,
rank=0,
local_rank=0,
distributed=False,
lr_scheduler=None,
save_handler=None,
output_transform=lambda loss: loss,
):
lr = 0.01
step_size = 100
gamma = 0.5
num_iters = 100
num_epochs = 10
model = DummyModel().to(device)
if distributed and "cuda" in torch.device(device).type:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
if lr_scheduler is None:
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite|LRScheduler":
from ignite.contrib.handlers import LRScheduler
lr_scheduler = LRScheduler(torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma))
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite":
from ignite.contrib.handlers import PiecewiseLinear
milestones_values = [(0, 0.0), (step_size, lr), (num_iters * (num_epochs - 1), 0.0)]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
else:
raise ValueError(f"Unknown lr_scheduler: {lr_scheduler}")
def update_fn(engine, batch):
optimizer.zero_grad()
x = torch.tensor([batch], requires_grad=True, device=device)
y_pred = model(x)
loss = y_pred.mean()
loss.backward()
optimizer.step()
return output_transform(loss)
train_sampler = None
if distributed and idist.get_world_size() > 1:
train_sampler = MagicMock(spec=DistributedSampler)
train_sampler.set_epoch = MagicMock()
trainer = Engine(update_fn)
setup_common_training_handlers(
trainer,
train_sampler=train_sampler,
to_save={"model": model, "optimizer": optimizer},
save_every_iters=75,
output_path=dirname,
save_handler=save_handler,
lr_scheduler=lr_scheduler,
with_gpu_stats=False,
output_names=["batch_loss"],
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=50,
)
data = [i * 0.1 for i in range(num_iters)]
trainer.run(data, max_epochs=num_epochs)
# check handlers
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
TerminateOnNan,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
assert "batch_loss" in trainer.state.metrics
# Check saved checkpoint
if rank == 0:
if save_handler is not None:
dirname = save_handler.dirname
checkpoints = list(os.listdir(dirname))
assert len(checkpoints) == 1
for v in [
"training_checkpoint",
]:
assert any([v in c for c in checkpoints])
# Check LR scheduling
assert optimizer.param_groups[0]["lr"] <= lr * gamma ** (
(num_iters * num_epochs - 1) // step_size
), f"{optimizer.param_groups[0]['lr']} vs {lr * gamma ** ((num_iters * num_epochs - 1) // step_size)}"
def test_asserts_setup_common_training_handlers():
trainer = Engine(lambda e, b: None)
with pytest.raises(
ValueError,
match=r"If to_save argument is provided then output_path or save_handler arguments should be also defined",
):
setup_common_training_handlers(trainer, to_save={})
with pytest.raises(ValueError, match=r"Arguments output_path and save_handler are mutually exclusive"):
setup_common_training_handlers(trainer, to_save={}, output_path="abc", save_handler=lambda c, f, m: None)
with pytest.warns(UserWarning, match=r"Argument train_sampler is a distributed sampler"):
train_sampler = MagicMock(spec=DistributedSampler)
setup_common_training_handlers(trainer, train_sampler=train_sampler)
if not torch.cuda.is_available():
with pytest.raises(RuntimeError, match=r"This contrib module requires available GPU"):
setup_common_training_handlers(trainer, with_gpu_stats=True)
with pytest.raises(TypeError, match=r"Unhandled type of update_function's output."):
trainer = Engine(lambda e, b: None)
setup_common_training_handlers(
trainer,
output_names=["loss"],
with_pbar_on_iters=False,
with_pbars=False,
with_gpu_stats=False,
stop_on_nan=False,
clear_cuda_cache=False,
)
trainer.run([1])
def test_no_warning_with_train_sampler(recwarn):
from torch.utils.data import RandomSampler
trainer = Engine(lambda e, b: None)
train_sampler = RandomSampler([0, 1, 2])
setup_common_training_handlers(trainer, train_sampler=train_sampler)
assert len(recwarn) == 0, recwarn.pop()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Should have more than 1 worker")
def test_assert_setup_common_training_handlers_wrong_train_sampler(distributed_context_single_node_gloo):
trainer = Engine(lambda e, b: None)
from torch.utils.data.sampler import RandomSampler
with pytest.raises(TypeError, match=r"Train sampler should be torch DistributedSampler"):
train_sampler = RandomSampler([0, 1, 2, 3])
setup_common_training_handlers(trainer, train_sampler)
def test_setup_common_training_handlers(dirname, capsys):
_test_setup_common_training_handlers(dirname, device="cpu")
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: [loss])
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: {"batch_loss": loss})
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
def test_setup_common_training_handlers_using_save_handler(dirname, capsys):
save_handler = DiskSaver(dirname=dirname, require_empty=False)
_test_setup_common_training_handlers(dirname=None, device="cpu", save_handler=save_handler)
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
def test_save_best_model_by_val_score(dirname):
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
model = DummyModel()
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": 1 - acc}
return trainer, evaluator, model
trainer, evaluator, model = setup_trainer()
save_best_model_by_val_score(dirname, evaluator, model, metric_name="acc", n_saved=2, trainer=trainer)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert set(os.listdir(dirname)) == {"best_model_8_val_acc=0.6100.pt", "best_model_9_val_acc=0.7000.pt"}
for fname in os.listdir(dirname):
os.unlink(f"{dirname}/{fname}")
trainer, evaluator, model = setup_trainer()
save_best_model_by_val_score(
dirname, evaluator, model, metric_name="loss", n_saved=2, trainer=trainer, score_sign=-1.0
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert set(os.listdir(dirname)) == {"best_model_8_val_loss=-0.3900.pt", "best_model_9_val_loss=-0.3000.pt"}
def test_gen_save_best_models_by_val_score():
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]
loss_scores = [0.9, 0.8, 0.7, 0.6, 0.7, 0.5, 0.4, 0.39, 0.3, 0.5]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
model = DummyModel()
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
loss = loss_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": loss}
return trainer, evaluator, model
trainer, evaluator, model = setup_trainer()
save_handler = MagicMock()
gen_save_best_models_by_val_score(
save_handler, evaluator, {"a": model, "b": model}, metric_name="acc", n_saved=2, trainer=trainer
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert save_handler.call_count == len(acc_scores) - 2 # 2 score values (0.3 and 0.5) are not the best
obj_to_save = {"a": model.state_dict(), "b": model.state_dict()}
save_handler.assert_has_calls(
[
call(
obj_to_save,
f"best_checkpoint_{e}_val_acc={p:.4f}.pt",
dict([("basename", "best_checkpoint"), ("score_name", "val_acc"), ("priority", p)]),
)
for e, p in zip([1, 2, 3, 4, 6, 7, 8, 9], [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.61, 0.7])
],
any_order=True,
)
trainer, evaluator, model = setup_trainer()
save_handler = MagicMock()
gen_save_best_models_by_val_score(
save_handler,
evaluator,
{"a": model, "b": model},
metric_name="loss",
n_saved=2,
trainer=trainer,
score_sign=-1.0,
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert save_handler.call_count == len(acc_scores) - 2 # 2 score values (-0.7 and -0.5) are not the best
obj_to_save = {"a": model.state_dict(), "b": model.state_dict()}
save_handler.assert_has_calls(
[
call(
obj_to_save,
f"best_checkpoint_{e}_val_loss={p:.4f}.pt",
dict([("basename", "best_checkpoint"), ("score_name", "val_loss"), ("priority", p)]),
)
for e, p in zip([1, 2, 3, 4, 6, 7, 8, 9], [-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.39, -0.3])
],
any_order=True,
)
def test_add_early_stopping_by_val_score():
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": 1 - acc}
return trainer, evaluator
trainer, evaluator = setup_trainer()
add_early_stopping_by_val_score(patience=3, evaluator=evaluator, trainer=trainer, metric_name="acc")
state = trainer.run([0, 1], max_epochs=len(acc_scores))
assert state.epoch == 7
trainer, evaluator = setup_trainer()
add_early_stopping_by_val_score(
patience=3, evaluator=evaluator, trainer=trainer, metric_name="loss", score_sign=-1.0
)
state = trainer.run([0, 1], max_epochs=len(acc_scores))
assert state.epoch == 7
def test_deprecated_setup_any_logging():
with pytest.raises(DeprecationWarning, match=r"deprecated since version 0.4.0"):
setup_any_logging(None, None, None, None, None, None)
def test__setup_logging_wrong_args():
with pytest.raises(TypeError, match=r"Argument optimizers should be either a single optimizer or"):
_setup_logging(MagicMock(), MagicMock(), "abc", MagicMock(), 1)
with pytest.raises(TypeError, match=r"Argument evaluators should be either a single engine or"):
_setup_logging(MagicMock(), MagicMock(), MagicMock(spec=torch.optim.SGD), "abc", 1)
def _test_setup_logging(
setup_logging_fn,
kwargs_dict,
output_handler_cls,
opt_params_handler_cls,
with_eval=True,
with_optim=True,
as_class=False,
log_every_iters=1,
):
trainer = Engine(lambda e, b: b)
evaluators = None
optimizers = None
if with_eval:
evaluator = Engine(lambda e, b: None)
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]}
evaluators = {"validation": evaluator}
if as_class:
evaluators = evaluators["validation"]
if with_optim:
t = torch.tensor([0])
optimizers = {"optimizer": torch.optim.SGD([t], lr=0.01)}
if as_class:
optimizers = optimizers["optimizer"]
kwargs_dict["trainer"] = trainer
kwargs_dict["optimizers"] = optimizers
kwargs_dict["evaluators"] = evaluators
kwargs_dict["log_every_iters"] = log_every_iters
x_logger = setup_logging_fn(**kwargs_dict)
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
if with_optim:
handlers = trainer._event_handlers[Events.ITERATION_STARTED]
for cls in [
opt_params_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
if with_eval:
handlers = evaluator._event_handlers[Events.COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
data = [0, 1, 2]
trainer.run(data, max_epochs=10)
if "output_path" in kwargs_dict:
tb_files = list(os.listdir(kwargs_dict["output_path"]))
assert len(tb_files) == 1
for v in [
"events",
]:
assert any([v in c for c in tb_files]), f"{tb_files}"
return x_logger
def test_setup_tb_logging(dirname):
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t1"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t2"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t3"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
as_class=True,
log_every_iters=None,
)
tb_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_setup_visdom_logging(visdom_offline_logfile):
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"offline": True, "log_to_filename": visdom_offline_logfile},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
vis_logger.close()
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"offline": True, "log_to_filename": visdom_offline_logfile},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
vis_logger.close()
def test_setup_plx_logging():
os.environ["POLYAXON_NO_OP"] = "1"
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_setup_mlflow_logging(dirname):
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": str(dirname / "p1")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
mlf_logger.close()
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": str(dirname / "p2")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
mlf_logger.close()
def test_setup_wandb_logging(dirname):
from unittest.mock import patch
with patch("ignite.contrib.engines.common.WandBLogger") as _:
setup_wandb_logging(MagicMock())
def test_setup_clearml_logging():
handlers.clearml_logger.ClearMLLogger.set_bypass_mode(True)
with pytest.warns(UserWarning, match=r"running in bypass mode"):
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_clearml_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
clearml_logger.close()
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_clearml_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
with pytest.warns(UserWarning, match="setup_trains_logging was renamed to setup_clearml_logging"):
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
def test_setup_neptune_logging(dirname):
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"mode": "offline"},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
npt_logger.close()
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"mode": "offline"},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
npt_logger.close()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(dirname, distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = idist.device()
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(dirname, distributed_context_single_node_gloo):
device = idist.device()
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite|LRScheduler"
)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite"
)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(dirname, distributed_context_multi_node_gloo):
device = idist.device()
rank = distributed_context_multi_node_gloo["rank"]
_test_setup_common_training_handlers(dirname, device, rank=rank)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(dirname, distributed_context_multi_node_nccl):
local_rank = distributed_context_multi_node_nccl["local_rank"]
rank = distributed_context_multi_node_nccl["rank"]
device = idist.device()
_test_setup_common_training_handlers(dirname, device, rank=rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
|
# coding: utf-8
import unittest.mock as mock
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ignite.contrib.engines import create_supervised_tbptt_trainer, Tbptt_Events
from ignite.contrib.engines.tbptt import _detach_hidden
def test_detach_hidden_RNN():
# Create hidden vector (in tuple)
X = torch.ones(2, 3, 4)
model = nn.RNN(4, 1)
_, hidden = model(X)
# Function to test
hidden_ = _detach_hidden(hidden)
assert hidden_.grad_fn is None # properly detached
assert (hidden == hidden_).all().item() == 1 # Equal values
def test_detach_hidden_LSTM():
# Create hidden vector (in tuple)
X = torch.ones(2, 3, 4)
model = nn.LSTM(4, 1)
_, hidden = model(X)
# Function to test
hidden_ = _detach_hidden(hidden)
for h, h_ in zip(hidden, hidden_):
assert h_.grad_fn is None # properly detached
assert (h == h_).all().item() == 1 # Equal values
def test_detach_hidden_raise():
with pytest.raises(TypeError):
_detach_hidden(0)
@mock.patch("ignite.contrib.engines.tbptt._detach_hidden")
def test_create_supervised_tbptt_trainer_callcounts(mock_detach_hidden):
# Mocking objects
model = mock.MagicMock()
# Necessary to unpack output
model.return_value = (1, 1)
optimizer = mock.MagicMock()
loss = mock.MagicMock()
trainer = create_supervised_tbptt_trainer(model, optimizer, loss, tbtt_step=2)
# Adding two mock handles to the trainer to monitor that TBPTT events are
# called correctly
handle_started = mock.MagicMock()
trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_STARTED, handle_started)
handle_completed = mock.MagicMock()
trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_COMPLETED, handle_completed)
# Fake data
X = torch.ones(6, 2, 1)
y = torch.ones(6, 2, 1)
data = [(X, y)]
# Running trainer
trainer.run(data)
# Verifications
assert handle_started.call_count == 3
assert handle_completed.call_count == 3
assert mock_detach_hidden.call_count == 2
assert model.call_count == 3
assert loss.call_count == 3
assert optimizer.zero_grad.call_count == 3
assert optimizer.step.call_count == 3
n_args_tuple = tuple(len(args) for args, kwargs in model.call_args_list)
assert n_args_tuple == (1, 2, 2)
def _test_create_supervised_tbptt_trainer(device):
# Defining dummy recurrent model with zero weights
model = nn.RNN(1, 1, bias=False)
model.to(device) # Move model before creating optimizer
for p in model.parameters():
p.data.zero_()
# Set some mock on forward to monitor
forward_mock = mock.MagicMock()
forward_mock.return_value = None
model.register_forward_hook(forward_mock)
# Defning optimizer and trainer
optimizer = optim.SGD(model.parameters(), 1)
trainer = create_supervised_tbptt_trainer(model, optimizer, F.mse_loss, tbtt_step=2, device=device)
# Fake data
X = torch.ones(6, 2, 1)
y = torch.ones(6, 2, 1)
data = [(X, y)]
# Running trainer
trainer.run(data)
# If tbptt is not use (one gradient update), the hidden to hidden weight
# should stay zero
assert not model.weight_hh_l0.item() == pytest.approx(0)
# Cheking forward calls
assert forward_mock.call_count == 3
for i in range(3):
inputs = forward_mock.call_args_list[i][0][1]
if i == 0:
assert len(inputs) == 1
else:
assert len(inputs) == 2
x, h = inputs
assert h.is_leaf
def test_create_supervised_tbptt_trainer_with_cpu():
_test_create_supervised_tbptt_trainer("cpu")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_tbptt_trainer_on_cuda():
_test_create_supervised_tbptt_trainer("cuda")
|
from unittest.mock import Mock
import pytest
import torch
@pytest.fixture()
def norm_mock():
def norm(x: torch.Tensor):
return x.norm()
norm_mock = Mock(side_effect=norm, spec=norm)
norm_mock.configure_mock(**{"__name__": "norm"})
norm_mock.reset_mock()
return norm_mock
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
def get_dummy_model(with_grads=True, with_frozen_layer=False, with_buffer=False):
model = DummyModel()
if with_grads:
model.fc2.weight.grad = torch.zeros_like(model.fc2.weight)
model.fc2.bias.grad = torch.zeros_like(model.fc2.bias)
if not with_frozen_layer:
model.fc1.weight.grad = torch.zeros_like(model.fc1.weight)
model.fc1.bias.grad = torch.zeros_like(model.fc1.bias)
if with_frozen_layer:
for param in model.fc1.parameters():
param.requires_grad = False
if with_buffer:
model.register_buffer("buffer1", torch.ones(1))
return model
return get_dummy_model
|
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.mlflow_logger import (
global_step_from_engine,
MLflowLogger,
OptimizerParamsHandler,
OutputHandler,
)
from ignite.engine import Engine, Events, State
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler 'OutputHandler' works only with MLflowLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"tag output": 12345}, step=123)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"another_tag loss": 12345}, step=123)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5)], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call({"tag a": 55.56}, step=7)], any_order=True)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with({"tag loss": 12345}, step=10)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
{"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0}, step=5
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OptimizerParamsHandler works only with MLflowLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"lr group_0": 0.01}, step=123)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"generator lr group_0": 0.01}, step=123)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
mlflow_logger = MLflowLogger(tracking_uri=str(dirname / "mlruns"))
true_values = []
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
v = global_step * 0.1
true_values.append(v)
logger.log_metrics({"test_value": v}, step=global_step)
mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
import mlflow
active_run = mlflow.active_run()
trainer.run(data, max_epochs=n_epochs)
mlflow_logger.close()
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "test_value")
for t, s in zip(true_values, stored_values):
assert pytest.approx(t) == s.value
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
true_values = []
with MLflowLogger(str(dirname / "mlruns")) as mlflow_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
v = global_step * 0.1
true_values.append(v)
logger.log_metrics({"test_value": v}, step=global_step)
mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
import mlflow
active_run = mlflow.active_run()
trainer.run(data, max_epochs=n_epochs)
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "test_value")
for t, s in zip(true_values, stored_values):
assert pytest.approx(t) == s.value
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_mlflow_bad_metric_name_handling(dirname):
import mlflow
true_values = [123.0, 23.4, 333.4]
with MLflowLogger(str(dirname / "mlruns")) as mlflow_logger:
active_run = mlflow.active_run()
handler = OutputHandler(tag="training", metric_names="all")
engine = Engine(lambda e, b: None)
engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0})
with pytest.warns(UserWarning, match=r"MLflowLogger output_handler encountered an invalid metric name"):
engine.state.epoch = 1
handler(engine, mlflow_logger, event_name=Events.EPOCH_COMPLETED)
for _, v in enumerate(true_values):
engine.state.epoch += 1
engine.state.metrics["metric 0"] = v
handler(engine, mlflow_logger, event_name=Events.EPOCH_COMPLETED)
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "training metric 0")
for t, s in zip([1000.0] + true_values, stored_values):
assert t == s.value
@pytest.mark.parametrize("no_site_packages", ["mlflow"], indirect=True)
def test_no_mlflow_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires mlflow to be installed."):
MLflowLogger()
|
from typing import Any, Union
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events, EventsList, State
from tests.ignite.contrib.handlers import MockFP16DeepSpeedZeroOptimizer
class DummyOutputHandler(BaseOutputHandler):
def __call__(self, *args, **kwargs):
pass
class DummyOptParamsHandler(BaseOptimizerParamsHandler):
def __call__(self, engine, logger, event_name, **kwargs):
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
return params
class DummyLogger(BaseLogger):
def _create_output_handler(self, *args, **kwargs):
return DummyOutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args, **kwargs):
return DummyOptParamsHandler(*args, **kwargs)
class DummyWeightsHandler(BaseWeightsHandler):
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
class DummyWeightsScalarHandler(BaseWeightsScalarHandler):
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
def test_base_output_handler_wrong_setup():
with pytest.raises(TypeError, match="metric_names should be either a list or equal 'all'"):
DummyOutputHandler("tag", metric_names="abc", output_transform=None)
with pytest.raises(TypeError, match="output_transform should be a function"):
DummyOutputHandler("tag", metric_names=None, output_transform="abc")
with pytest.raises(ValueError, match="Either metric_names, output_transform or state_attributes should be defined"):
DummyOutputHandler("tag", None, None)
with pytest.raises(TypeError, match="global_step_transform should be a function"):
DummyOutputHandler("tag", metric_names=["loss"], global_step_transform="abc")
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
DummyOptParamsHandler({}, "lr")
def test_base_output_handler_setup_output_metrics():
engine = Engine(lambda engine, batch: None)
true_metrics = {"a": 0, "b": 1}
engine.state = State(metrics=true_metrics)
engine.state.output = 12345
# Only metric_names
handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=None)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1}
# Only metric_names with a warning
handler = DummyOutputHandler("tag", metric_names=["a", "c"], output_transform=None)
with pytest.warns(UserWarning):
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0}
# Only output as "output"
handler = DummyOutputHandler("tag", metric_names=None, output_transform=lambda x: x)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/output": engine.state.output}
# Only output as "loss"
handler = DummyOutputHandler("tag", metric_names=None, output_transform=lambda x: {"loss": x})
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/loss": engine.state.output}
# Metrics and output
handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1, "tag/loss": engine.state.output}
# All metrics
handler = DummyOutputHandler("tag", metric_names="all", output_transform=None)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1}
def test_base_output_handler_setup_output_state_attrs():
engine = Engine(lambda engine, batch: None)
true_metrics = {"a": 0, "b": 1}
engine.state = State(metrics=true_metrics)
engine.state.alpha = 3.899
engine.state.beta = torch.tensor(5.499)
engine.state.gamma = torch.tensor([2106.0, 6.0])
engine.state.output = 12345
# Only State Attributes
handler = DummyOutputHandler(
tag="tag", metric_names=None, output_transform=None, state_attributes=["alpha", "beta", "gamma"]
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
}
# Metrics and Attributes
handler = DummyOutputHandler(
tag="tag", metric_names=["a", "b"], output_transform=None, state_attributes=["alpha", "beta", "gamma"]
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/a": 0,
"tag/b": 1,
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
}
# Metrics, Attributes and output
handler = DummyOutputHandler(
tag="tag",
metric_names="all",
output_transform=lambda x: {"loss": x},
state_attributes=["alpha", "beta", "gamma"],
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/a": 0,
"tag/b": 1,
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
"tag/loss": engine.state.output,
}
def test_opt_params_handler_on_non_torch_optimizers():
tensor = torch.zeros([1], requires_grad=True)
base_optimizer = torch.optim.SGD([tensor], lr=0.1234)
optimizer = MockFP16DeepSpeedZeroOptimizer(base_optimizer)
handler = DummyOptParamsHandler(optimizer=optimizer, param_name="lr")
res = handler(engine=None, logger=None, event_name=None)
assert isinstance(res, dict)
assert "lr/group_0" in res and res["lr/group_0"] == 0.1234
@pytest.mark.parametrize(
"event, n_calls, kwargs",
[
(Events.ITERATION_STARTED, 50 * 5, {"a": 0}),
(Events.ITERATION_COMPLETED, 50 * 5, {}),
(Events.EPOCH_STARTED, 5, {}),
(Events.EPOCH_COMPLETED, 5, {}),
(Events.STARTED, 1, {}),
(Events.COMPLETED, 1, {}),
(Events.ITERATION_STARTED(every=10), 50 // 10 * 5, {}),
(Events.STARTED | Events.COMPLETED, 2, {}),
],
)
def test_attach(event, n_calls, kwargs):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
logger = DummyLogger()
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event, **kwargs)
trainer.run(data, max_epochs=n_epochs)
if isinstance(event, EventsList):
events = [e for e in event]
else:
events = [event]
if len(kwargs) > 0:
calls = [call(trainer, logger, e, **kwargs) for e in events]
else:
calls = [call(trainer, logger, e) for e in events]
mock_log_handler.assert_has_calls(calls)
assert mock_log_handler.call_count == n_calls
def test_attach_wrong_event_name():
trainer = Engine(lambda b, e: None)
logger = DummyLogger()
mock_log_handler = MagicMock()
with pytest.raises(RuntimeError, match="Unknown event name"):
logger.attach(trainer, log_handler=mock_log_handler, event_name="unknown")
events_list = EventsList()
events_list._events = ["unknown"]
with pytest.raises(RuntimeError, match="Unknown event name"):
logger.attach(trainer, log_handler=mock_log_handler, event_name=events_list)
def test_attach_on_custom_event():
n_epochs = 10
data = list(range(150))
def _test(event, n_calls, cpe):
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
cpe.attach(trainer)
logger = DummyLogger()
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event)
trainer.run(data, max_epochs=n_epochs)
mock_log_handler.assert_called_with(trainer, logger, event)
assert mock_log_handler.call_count == n_calls
@pytest.mark.parametrize(
"event, n_calls",
[
(Events.ITERATION_STARTED, 50 * 5),
(Events.ITERATION_COMPLETED, 50 * 5),
(Events.EPOCH_STARTED, 5),
(Events.EPOCH_COMPLETED, 5),
(Events.STARTED, 1),
(Events.COMPLETED, 1),
(Events.ITERATION_STARTED(every=10), 50 // 10 * 5),
],
)
def test_as_context_manager(event, n_calls):
n_epochs = 5
data = list(range(50))
class _DummyLogger(DummyLogger):
def __init__(self, writer):
self.writer = writer
def close(self):
self.writer.close()
global close_counter
close_counter = 0
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
writer = MagicMock()
writer.close = MagicMock()
with _DummyLogger(writer) as logger:
assert isinstance(logger, _DummyLogger)
trainer = Engine(update_fn)
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event)
trainer.run(data, max_epochs=n_epochs)
mock_log_handler.assert_called_with(trainer, logger, event)
assert mock_log_handler.call_count == n_calls
writer.close.assert_called_once_with()
def test_base_weights_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
DummyWeightsHandler(None)
def test_base_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
DummyWeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
DummyWeightsScalarHandler(model, reduction=lambda x: x)
|
import math
import os
from collections import defaultdict
from unittest.mock import ANY, call, MagicMock, patch
import clearml
import pytest
import torch
from clearml.binding.frameworks import WeightsFileHandler
from clearml.model import Framework
import ignite.distributed as idist
from ignite.contrib.handlers.clearml_logger import (
ClearMLLogger,
ClearMLSaver,
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint
def test_no_clearml():
with patch.dict("sys.modules", {"clearml": None, "trains": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLSaver()
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLLogger()
with patch.dict("sys.modules", {"clearml.binding.frameworks.tensorflow_bind": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLLogger()
with patch.dict("sys.modules", {"clearml.binding.frameworks": None, "trains.binding.frameworks": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLSaver.__call__(None, {}, "")
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with ClearMLLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(iteration=123, series="0", title="lr", value=0.01)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="0", title="generator/lr", value=0.01
)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OutputHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform(dirname):
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="output", title="tag", value=12345
)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="loss", title="another_tag", value=12345
)
def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
with pytest.warns(UserWarning, match=r"Logger output_handler can not log metrics value type"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="a", iteration=7, value=55.56)], any_order=True
)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
# log a torch vector
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
vector = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.33])
mock_engine.state = State(metrics={"vector": vector})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 5
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag/vector", series=str(i), iteration=5, value=vector[i].item()) for i in range(5)],
any_order=True,
)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45), "c": torch.tensor(5.01)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 3
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=torch.tensor(12.23).item()),
call(title="tag", series="b", iteration=5, value=torch.tensor(23.45).item()),
call(title="tag", series="c", iteration=5, value=torch.tensor(5.01).item()),
],
any_order=True,
)
def test_output_handler_both(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 3
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
call(title="tag", series="loss", iteration=5, value=12345),
],
any_order=True,
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.0)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 4
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="alpha", iteration=5, value=3.899),
call(title="tag", series="beta", iteration=5, value=12.0),
call(title="tag/gamma", series="0", iteration=5, value=21.0),
call(title="tag/gamma", series="1", iteration=5, value=6.0),
],
any_order=True,
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=10, value=12345)]
)
def test_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler WeightsScalarHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.clearml_logger.report_scalar.call_count == 4
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title=tag_prefix + "weights_norm/fc1", series="weight", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc1", series="bias", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc2", series="weight", iteration=5, value=12.0),
call(title=tag_prefix + "weights_norm/fc2", series="bias", iteration=5, value=math.sqrt(12.0)),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
title="weights_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
)
mock_logger.clearml_logger.report_scalar.reset_mock()
wrapper = WeightsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/weights_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch),
call(title="model/weights_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/weights_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(title="model/weights_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
def test_weights_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsHistHandler' works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_called_once_with(
title="weights_fc2", hist_data=ANY, series="weight", step=5
)
mock_logger.grad_helper.add_histogram.reset_mock()
wrapper = WeightsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/weights_fc1", hist_data=ANY, series="weight", step=5),
call(title="model/weights_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
mock_logger.grad_helper.add_histogram.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/weights_fc1", hist_data=ANY, series="bias", step=5),
call(title="model/weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
def test_grads_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler GradsScalarHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(
title=tag_prefix + "grads_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(
title=tag_prefix + "grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
title="grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
)
mock_logger.clearml_logger.report_scalar.reset_mock()
wrapper = GradsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/grads_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch),
call(title="model/grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(title="model/grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
def test_grads_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsHistHandler' works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_grads_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_called_once_with(
title="grads_fc2", hist_data=ANY, series="weight", step=5
)
mock_logger.grad_helper.reset_mock()
wrapper = GradsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/grads_fc1", hist_data=ANY, series="weight", step=5),
call(title="model/grads_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
mock_logger.grad_helper.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/grads_fc1", hist_data=ANY, series="bias", step=5),
call(title="model/grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
logger = ClearMLLogger(output_uri=dirname)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
test_value = 0.3 # example
logger.clearml_logger.report_scalar(title="", series="", value=test_value, iteration=global_step)
logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
logger.close()
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
with ClearMLLogger(output_uri=dirname) as clearml_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
test_value = 0.3 # example
logger.clearml_logger.report_scalar(title="", series="", value=test_value, iteration=global_step)
clearml_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_clearml_logger_getattr_method(dirname):
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
logger = ClearMLLogger(output_uri=dirname)
# Create a mock clearml.Logger() object
mock_logger = MagicMock()
logger.clearml_logger = mock_logger
# Test a method called by __getattr__ calls the corresponding method of the mock project.
logger.report_single_value("accuracy", 0.72)
mock_logger.report_single_value.assert_called_once_with("accuracy", 0.72)
# Test a method called by __getattr__ calls the corresponding classmethod of the mock project's class.
logger.current_logger()
mock_logger.current_logger.assert_called_once()
logger.close()
def test_clearml_logger_get_task_bypass(dirname):
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
with ClearMLLogger(output_uri=dirname) as clearml_logger:
task = clearml_logger.get_task()
assert isinstance(task, clearml.Task)
assert task == clearml.Task.current_task()
task.close()
def test_clearml_disk_saver_integration():
model = torch.nn.Module()
to_save_serializable = {"model": model}
with pytest.warns(UserWarning, match="ClearMLSaver created a temporary checkpoints directory"):
mock_logger = MagicMock(spec=ClearMLLogger)
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml_saver = ClearMLSaver(mock_logger)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
checkpoint = Checkpoint(to_save=to_save_serializable, save_handler=clearml_saver, n_saved=1)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpoint(trainer)
trainer.state.iteration = 1
checkpoint(trainer)
if clearml_saver._atomic:
assert clearml.binding.frameworks.WeightsFileHandler.create_output_model.call_count == 2
else:
saved_files = list(os.listdir(clearml_saver.dirname))
assert len(saved_files) == 1
assert saved_files[0] == "model_1.pt"
def test_clearml_disk_saver_integration_no_logger():
model = torch.nn.Module()
to_save_serializable = {"model": model}
with pytest.warns(UserWarning, match="ClearMLSaver created a temporary checkpoints directory"):
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
clearml_saver = ClearMLSaver()
checkpoint = Checkpoint(to_save=to_save_serializable, save_handler=clearml_saver, n_saved=1)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpoint(trainer)
trainer.state.iteration = 1
checkpoint(trainer)
if clearml_saver._atomic:
assert clearml.binding.frameworks.WeightsFileHandler.create_output_model.call_count == 2
else:
saved_files = list(os.listdir(clearml_saver.dirname))
assert len(saved_files) == 1
assert saved_files[0] == "model_1.pt"
def test_clearml_saver_callbacks():
mock_task = MagicMock(spec=clearml.Task)
mock_task.name = "check-task"
mock_model = MagicMock(spec=clearml.OutputModel)
model_info = WeightsFileHandler.ModelInfo(
model=mock_model,
upload_filename="test.pt",
local_model_path="",
local_model_id="",
framework=Framework.pytorch,
task=mock_task,
)
mock_model_info = MagicMock(spec_set=model_info)
# Simulate 4 calls to save model and 2 to remove (n_saved=2)
filenames = [
"best_model_5_val_acc=0.123.pt",
"best_model_6_val_acc=0.234.pt",
"best_model_7_val_acc=0.356.pt",
"best_model_8_val_acc=0.456.pt",
]
metadata_list = [
{"basename": "best_model", "score_name": "val_acc", "priority": 0.123},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.234},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.345},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.456},
]
dirname = "/tmp/test"
_checkpoint_slots = defaultdict(list)
n_saved = 2
for i, (filename, metadata) in enumerate(zip(filenames, metadata_list)):
mock_model_info.upload_filename = filename
if i >= n_saved:
# Remove
filename_to_remove = filenames[i % n_saved]
for slots in _checkpoint_slots.values():
try:
slots[slots.index(filename_to_remove)] = None
except ValueError:
pass
else:
i = i % n_saved
break
basename = metadata["basename"]
checkpoint_key = (dirname, basename)
context = ClearMLSaver._CallbacksContext(
callback_type=WeightsFileHandler.CallbackType,
slots=_checkpoint_slots[checkpoint_key],
checkpoint_key=str(checkpoint_key),
filename=filename,
basename=basename,
metadata=metadata,
)
output_model_info = context.pre_callback(str(WeightsFileHandler.CallbackType.save), mock_model_info)
assert (
hasattr(output_model_info, "upload_filename") and f"{basename}_{i}.pt" in output_model_info.upload_filename
)
assert hasattr(output_model_info, "local_model_id") and str(checkpoint_key) in output_model_info.local_model_id
output_model_info = context.post_callback(str(WeightsFileHandler.CallbackType.save), mock_model_info)
assert hasattr(output_model_info, "model") and hasattr(output_model_info.model, "name")
assert hasattr(output_model_info, "model") and hasattr(output_model_info.model, "comment")
assert isinstance(output_model_info.model.name, str) and filename in output_model_info.model.name
assert (
isinstance(output_model_info.model.comment, str)
and metadata["basename"] in output_model_info.model.comment
and metadata["score_name"] in output_model_info.model.comment
)
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = torch.nn.Linear(2, 2)
def forward(self, x):
return self.net(x)
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=False):
if idist.get_rank() == 0:
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 2)).to(device)
optim.zero_grad()
y = model(x)
# Below code raises: RuntimeError: torch_xla/csrc/tensor_impl.cpp:144 : XLA tensors do not have storage
# Probably related to https://github.com/pytorch/xla/issues/2576
# loss = y.pow(2.0).sum()
loss = y.sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
with pytest.warns(UserWarning, match=r"ClearMLSaver created a temporary checkpoints directory"):
clearml_saver = ClearMLSaver()
if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):
checkpoint = Checkpoint(to_save=to_save, save_handler=clearml_saver, n_saved=1)
engine.add_event_handler(Events.EPOCH_COMPLETED, checkpoint)
engine.run([0], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(clearml_saver.dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = clearml_saver.dirname / saved_objects[0]
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert (model_value.cpu().numpy() == loaded_model_value.cpu().numpy()).all()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=True)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla():
device = idist.device()
assert "xla" in device.type
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
def _test_save_model_optimizer_lr_scheduler_with_state_dict_xla_nprocs(index):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_save_model_optimizer_lr_scheduler_with_state_dict_xla_nprocs, args=(), nprocs=n)
|
class MockFP16DeepSpeedZeroOptimizer:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
self.optimizer.step()
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
|
import math
import warnings
from unittest.mock import MagicMock
import pytest
import torch
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
OptimizerParamsHandler,
OutputHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def assert_logger_called_once_with(logger, key, value):
result = logger[key].fetch_values()
assert len(result.value) == 1
if isinstance(result.value[0], float):
assert math.isclose(result.value[0], value, abs_tol=0.01)
else:
assert result.value[0] == value
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OptimizerParamsHandler works only with NeptuneLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "lr/group_0", 0.01)
logger.stop()
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "generator/lr/group_0", 0.01)
logger.stop()
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OutputHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/output", 12345)
logger.stop()
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "another_tag/loss", 12345)
logger.stop()
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
for key, val in [("tag/a/0", 0.0), ("tag/a/1", 1.0), ("tag/a/2", 2.0), ("tag/a/3", 3.0)]:
assert_logger_called_once_with(logger, key, val)
logger.stop()
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
with pytest.warns(UserWarning):
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 55.56)
logger.stop()
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45)})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
assert_logger_called_once_with(logger, "tag/loss", 12345)
logger.stop()
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
logger.stop()
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/loss", mock_engine.state.output)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
result = logger["tag/loss"].fetch_values()
assert len(result.value) == 2
assert result.value[1] == mock_engine.state.output
logger.stop()
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/loss", 12345)
logger.stop()
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.23)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/alpha", 3.899)
assert_logger_called_once_with(logger, "tag/beta", 12.23)
assert_logger_called_once_with(logger, "tag/gamma/0", 21.0)
assert_logger_called_once_with(logger, "tag/gamma/1", 6.0)
logger.stop()
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler WeightsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc1/weight", 0.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc1/bias", 0.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc2/weight", 12.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc2/bias", math.sqrt(12.0))
logger.stop()
_test()
_test(tag="tag")
def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = WeightsScalarHandler(model)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "weights_norm/fc2/weight", 12.0)
assert_logger_called_once_with(logger, "weights_norm/fc2/bias", math.sqrt(12.0))
assert not logger.exists("weights_norm/fc1/weight")
assert not logger.exists("weights_norm/fc1/bias")
logger.stop()
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler GradsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert logger.exists(tag_prefix + "grads_norm/fc1/weight")
assert logger.exists(tag_prefix + "grads_norm/fc1/bias")
assert logger.exists(tag_prefix + "grads_norm/fc2/weight")
assert logger.exists(tag_prefix + "grads_norm/fc2/bias")
logger.stop()
_test()
_test(tag="tag")
def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = GradsScalarHandler(model, reduction=norm_mock)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert logger.exists("grads_norm/fc2/weight")
assert logger.exists("grads_norm/fc2/bias")
assert not logger.exists("grads_norm/fc1/weight")
assert not logger.exists("grads_norm/fc1/bias")
logger.stop()
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
npt_logger = NeptuneLogger(mode="offline")
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger["test_value"].append(global_step, step=global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
npt_logger.close()
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with NeptuneLogger(mode="offline") as npt_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger["test_value"].append(global_step, step=global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_neptune_saver_serializable(dirname):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.upload = MagicMock()
model = torch.nn.Module()
to_save_serializable = {"model": model}
saver = NeptuneSaver(mock_logger)
fname = dirname / "test.pt"
saver(to_save_serializable, fname)
assert mock_logger[dirname / "test.pt"].upload.call_count == 1
@pytest.mark.parametrize("model, serializable", [(lambda x: x, False), (torch.nn.Module().to("cpu"), True)])
def test_neptune_saver(model, serializable):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.upload = MagicMock()
to_save_non_serializable = {"model": model}
saver = NeptuneSaver(mock_logger)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(to_save_non_serializable, fname)
except Exception:
pass
assert mock_logger["model"].upload.call_count == int(serializable)
def test_logs_version():
from ignite import __version__
from ignite.contrib.handlers.neptune_logger import _INTEGRATION_VERSION_KEY
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
assert logger[_INTEGRATION_VERSION_KEY].fetch() == __version__
|
import sys
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.visdom_logger import (
_DummyExecutor,
global_step_from_engine,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with VisdomLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
# mock_logger.vis.line.assert_called_once_with("lr/group_0", 0.01, 123)
assert len(wrapper.windows) == 1 and "lr/group_0" in wrapper.windows
assert wrapper.windows["lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["lr/group_0"]["opts"],
name="lr/group_0",
)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "generator/lr/group_0" in wrapper.windows
assert wrapper.windows["generator/lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["generator/lr/group_0"]["opts"],
name="generator/lr/group_0",
)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform(dirname):
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "tag/output" in wrapper.windows
assert wrapper.windows["tag/output"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/output"]["opts"],
name="tag/output",
)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "another_tag/loss" in wrapper.windows
assert wrapper.windows["another_tag/loss"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["another_tag/loss"]["opts"],
name="another_tag/loss",
)
def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert mock_logger.vis.line.call_count == 2
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 4 and all([f"tag/a/{i}" in wrapper.windows for i in range(4)])
assert wrapper.windows["tag/a/0"]["win"] is not None
assert wrapper.windows["tag/a/1"]["win"] is not None
assert wrapper.windows["tag/a/2"]["win"] is not None
assert wrapper.windows["tag/a/3"]["win"] is not None
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/0"]["opts"],
name="tag/a/0",
),
call(
X=[5],
Y=[1.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/1"]["opts"],
name="tag/a/1",
),
call(
X=[5],
Y=[2.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/2"]["opts"],
name="tag/a/2",
),
call(
X=[5],
Y=[3.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/3"]["opts"],
name="tag/a/3",
),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "tag/a" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert mock_logger.vis.line.call_count == 1
mock_logger.vis.line.assert_has_calls(
[
call(
X=[7],
Y=[55.56],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
],
any_order=True,
)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert mock_logger.vis.line.call_count == 2
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
],
any_order=True,
)
def test_output_handler_both(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 3
assert (
len(wrapper.windows) == 3
and "tag/a" in wrapper.windows
and "tag/b" in wrapper.windows
and "tag/loss" in wrapper.windows
)
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
call(
X=[5],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
),
],
any_order=True,
)
mock_engine.state.epoch = 6
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 6
assert (
len(wrapper.windows) == 3
and "tag/a" in wrapper.windows
and "tag/b" in wrapper.windows
and "tag/loss" in wrapper.windows
)
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[6],
Y=[12.23],
env=mock_logger.vis.env,
win=wrapper.windows["tag/a"]["win"],
update="append",
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[6],
Y=[23.45],
env=mock_logger.vis.env,
win=wrapper.windows["tag/b"]["win"],
update="append",
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
call(
X=[6],
Y=[12345],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
),
],
any_order=True,
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.0)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.vis.line.call_count == 4
assert (
len(wrapper.windows) == 4
and "tag/alpha" in wrapper.windows
and "tag/beta" in wrapper.windows
and "tag/gamma/0" in wrapper.windows
and "tag/gamma/1" in wrapper.windows
)
assert wrapper.windows["tag/alpha"]["win"] is not None
assert wrapper.windows["tag/beta"]["win"] is not None
assert wrapper.windows["tag/gamma/0"]["win"] is not None
assert wrapper.windows["tag/gamma/1"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[3.899],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/alpha"]["opts"],
name="tag/alpha",
),
call(
X=[5],
Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/beta"]["opts"],
name="tag/beta",
),
call(
X=[5],
Y=[21.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/gamma/0"]["opts"],
name="tag/gamma/0",
),
call(
X=[5],
Y=[6.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/gamma/1"]["opts"],
name="tag/gamma/1",
),
],
any_order=True,
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 1
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[10],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 1
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[mock_another_engine.state.epoch],
Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 2
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[mock_another_engine.state.epoch],
Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsScalarHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
model = DummyModel()
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc1/weight"]["opts"],
name=tag_prefix + "weights_norm/fc1/weight",
),
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc1/bias"]["opts"],
name=tag_prefix + "weights_norm/fc1/bias",
),
call(
X=[5],
Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc2/weight"]["opts"],
name=tag_prefix + "weights_norm/fc2/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc2/bias"]["opts"],
name=tag_prefix + "weights_norm/fc2/bias",
),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_custom_reduction():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
model = DummyModel()
def norm(x):
return 12.34
wrapper = WeightsScalarHandler(model, reduction=norm, show_legend=True)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc1/weight"]["opts"],
name="weights_norm/fc1/weight",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc1/bias"]["opts"],
name="weights_norm/fc1/bias",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc2/weight"]["opts"],
name="weights_norm/fc2/weight",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc2/bias"]["opts"],
name="weights_norm/fc2/bias",
),
],
any_order=True,
)
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsScalarHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc1/weight"]["opts"],
name=tag_prefix + "grads_norm/fc1/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc1/bias"]["opts"],
name=tag_prefix + "grads_norm/fc1/bias",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc2/weight"]["opts"],
name=tag_prefix + "grads_norm/fc2/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc2/bias"]["opts"],
name=tag_prefix + "grads_norm/fc2/bias",
),
],
any_order=True,
)
_test()
_test(tag="tag")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_no_server():
with pytest.raises(ConnectionError, match="Error connecting to Visdom server"):
VisdomLogger()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_logger_init_hostname_port(visdom_server):
# Explicit hostname, port
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
assert "main" in vd_logger.vis.get_env_list()
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_logger_init_env_vars(visdom_server):
# As env vars
import os
os.environ["VISDOM_SERVER_URL"] = visdom_server[0]
os.environ["VISDOM_PORT"] = str(visdom_server[1])
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
assert "main" in vd_logger.vis.get_env_list()
vd_logger.close()
def _parse_content(content):
import json
return json.loads(content)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_no_executor(visdom_server):
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
# close all windows in 'main' environment
vd_logger.vis.close()
n_epochs = 3
data = list(range(10))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_with_executor(visdom_server):
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=1)
# close all windows in 'main' environment
vd_logger.vis.close()
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_with_executor_as_context_manager(visdom_server, visdom_server_stop):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=1) as vd_logger:
# close all windows in 'main' environment
vd_logger.vis.close()
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
@pytest.mark.parametrize("no_site_packages", ["visdom"], indirect=True)
def test_no_visdom(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires visdom package"):
VisdomLogger()
def test_no_concurrent():
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires concurrent.futures"):
with patch.dict("sys.modules", {"concurrent.futures": None}):
VisdomLogger(num_workers=1)
|
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.wandb_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
WandBLogger,
)
from ignite.engine import Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with WandBLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"lr/group_0": 0.01}, step=123, sync=None)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"generator/lr/group_0": 0.01}, step=123, sync=None)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with WandBLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/output": 12345}, step=123, sync=None)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"another_tag/loss": 12345}, step=123, sync=None)
def test_output_handler_output_transform_sync():
wrapper = OutputHandler("tag", output_transform=lambda x: x, sync=False)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/output": 12345}, step=123, sync=False)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x}, sync=True)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"another_tag/loss": 12345}, step=123, sync=True)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 1, "b": 5})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 1, "tag/b": 5}, step=5, sync=None)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 55.56, "tag/c": "Some text"}, step=7, sync=None)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 12.23, "tag/b": 23.45}, step=5, sync=None)
# log a torch vector
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
vector = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.33])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": vector})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({f"tag/a/{i}": vector[i].item() for i in range(5)}, step=5, sync=None)
# log warning
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": [1, 2, 3, 4]})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
with pytest.warns(UserWarning, match=r"Logger output_handler can not log metrics value type"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345}, step=5, sync=None)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with({"tag/loss": 12345}, step=10, sync=None)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with(
{"tag/loss": mock_engine.state.output}, step=mock_another_engine.state.epoch, sync=None
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log.call_count == 2
mock_logger.log.assert_has_calls(
[call({"tag/loss": mock_engine.state.output}, step=mock_another_engine.state.epoch, sync=None)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma", "delta"])
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
mock_engine.state.delta = "Some Text"
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with(
{
"tag/alpha": 3.899,
"tag/beta": torch.tensor(12.21).item(),
"tag/gamma/0": 21.0,
"tag/gamma/1": 6.0,
"tag/delta": "Some Text",
},
step=5,
sync=None,
)
def test_wandb_close():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.close()
@pytest.mark.parametrize("no_site_packages", ["wandb"], indirect=True)
def test_no_wandb_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires wandb to be installed."):
WandBLogger()
def test_wandb_getattr():
import wandb
logger = WandBLogger(init=False)
assert wandb.log == logger.log
|
import os
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
PolyaxonLogger,
)
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0})], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
**{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0}, step=5
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"test_value": global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
plx_logger.close()
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"test_value": global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.mark.parametrize("no_site_packages", ["polyaxon"], indirect=True)
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires polyaxon"):
PolyaxonLogger()
|
# -*- coding: utf-8 -*-
import sys
import time
from argparse import Namespace
from unittest.mock import patch
import numpy as np
import pytest
import torch
from packaging.version import Version
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import TerminateOnNan
from ignite.metrics import RunningAverage
if sys.platform.startswith("win"):
pytest.skip("Skip on Windows", allow_module_level=True)
def get_tqdm_version():
import tqdm
return Version(tqdm.__version__)
def update_fn(engine, batch):
a = 1
engine.state.metrics["a"] = a
return a
def test_pbar_errors():
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires tqdm to be installed"):
with patch.dict("sys.modules", {"tqdm.autonotebook": None}):
ProgressBar()
pbar = ProgressBar()
with pytest.raises(ValueError, match=r"Logging event abc is not in allowed"):
pbar.attach(Engine(lambda e, b: None), event_name=Namespace(name="abc"))
def test_pbar(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_file(tmp_path):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
file_path = tmp_path / "temp.txt"
file = open(str(file_path), "w+")
pbar = ProgressBar(file=file)
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
file.close() # Force a flush of the buffer. file.flush() does not work.
file = open(str(file_path), "r")
lines = file.readlines()
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]\n"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]\n"
assert lines[-2] == expected
def test_pbar_log_message(capsys):
pbar = ProgressBar()
pbar.log_message("test")
captured = capsys.readouterr()
out = captured.out.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
expected = "test"
assert out[-1] == expected
def test_pbar_log_message_file(tmp_path):
file_path = tmp_path / "temp.txt"
file = open(str(file_path), "w+")
pbar = ProgressBar(file=file)
pbar.log_message("test")
file.close() # Force a flush of the buffer. file.flush() does not work.
file = open(str(file_path), "r")
lines = file.readlines()
expected = "test\n"
assert lines[0] == expected
def test_attach_fail_with_string():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(TypeError):
pbar.attach(engine, "a")
def test_pbar_batch_indeces(capsys):
engine = Engine(lambda e, b: time.sleep(0.1))
@engine.on(Events.ITERATION_STARTED)
def print_iter(_):
print("iteration: ", engine.state.iteration)
ProgressBar(persist=True).attach(engine)
engine.run(list(range(4)), max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
printed_batch_indeces = set(map(lambda x: int(x.split("/")[0][-1]), err))
expected_batch_indeces = list(range(1, 5))
assert sorted(list(printed_batch_indeces)) == expected_batch_indeces
def test_pbar_with_metric(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
def step(engine, batch):
loss_value = next(loss_values)
return loss_value
trainer = Engine(step)
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names=["batchloss"])
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5 [00:00<?]"
assert actual == expected
def test_pbar_with_all_metric(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
another_loss_values = iter(range(1, n_iters + 1))
def step(engine, batch):
loss_value = next(loss_values)
another_loss_value = next(another_loss_values)
return loss_value, another_loss_value
trainer = Engine(step)
RunningAverage(alpha=0.5, output_transform=lambda x: x[0]).attach(trainer, "batchloss")
RunningAverage(alpha=0.5, output_transform=lambda x: x[1]).attach(trainer, "another batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names="all")
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5, another batchloss=1.5 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5, another batchloss=1.5 [00:00<?]"
assert actual == expected
def test_pbar_with_state_attrs(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
def step(engine, batch):
loss_value = next(loss_values)
return loss_value
trainer = Engine(step)
trainer.state.alpha = 3.899
trainer.state.beta = torch.tensor(12.21)
trainer.state.gamma = torch.tensor([21.0, 6.0])
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names=["batchloss"], state_attributes=["alpha", "beta", "gamma"])
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = (
"Iteration: [1/2] 50%|█████ , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<00:00]"
)
else:
expected = (
"Iteration: [1/2] 50%|█████ , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<?]"
)
assert actual == expected
def test_pbar_no_metric_names(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ [00:00<?]"
assert actual == expected
def test_pbar_with_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: {"a": x})
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_fail_with_non_callable_transform():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(TypeError):
pbar.attach(engine, output_transform=1)
def test_pbar_with_scalar_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_with_str_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: "red")
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<?]"
assert err[-1] == expected
def test_pbar_with_tqdm_kwargs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar(desc="My description: ")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "My description: [10/10]: [4/5] 80%|████████ , output=1 [00:00<00:00]"
assert err[-1] == expected
def test_pbar_for_validation(capsys):
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar(desc="Validation")
pbar.attach(engine)
engine.run(loader, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "Validation: [4/5] 80%|████████ [00:00<00:00]"
assert err[-1] == expected
def test_pbar_output_tensor(capsys):
def _test(out_tensor, out_msg):
loader = [1, 2, 3, 4, 5]
def update_fn(engine, batch):
return out_tensor
engine = Engine(update_fn)
pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = f"Output tensor: [4/5] 80%|████████ , {out_msg} [00:00<00:00]"
assert err[-1] == expected
_test(out_tensor=torch.tensor([5, 0]), out_msg="output_0=5, output_1=0")
_test(out_tensor=torch.tensor(123), out_msg="output=123")
_test(out_tensor=torch.tensor(1.234), out_msg="output=1.23")
def test_pbar_output_warning(capsys):
loader = [1, 2, 3, 4, 5]
def update_fn(engine, batch):
return torch.zeros(1, 2, 3, 4)
engine = Engine(update_fn)
pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
with pytest.warns(UserWarning):
engine.run(loader, max_epochs=1)
def test_pbar_on_epochs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch: [9/10] 90%|█████████ [00:00<00:00]"
assert actual == expected
def test_pbar_with_max_epochs_set_to_one(capsys):
n_epochs = 1
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_wrong_events_order():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.EPOCH_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.ITERATION_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.EPOCH_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.ITERATION_COMPLETED, closing_event_name=Events.ITERATION_STARTED)
with pytest.raises(ValueError, match="should not be a filtered event"):
pbar.attach(engine, event_name=Events.ITERATION_STARTED, closing_event_name=Events.EPOCH_COMPLETED(every=10))
def test_pbar_with_nan_input():
def update(engine, batch):
x = batch
return x.item()
def create_engine():
engine = Engine(update)
pbar = ProgressBar()
engine.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
pbar.attach(engine, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.COMPLETED)
return engine
data = torch.from_numpy(np.array([np.nan] * 25))
engine = create_engine()
engine.run(data)
assert engine.should_terminate
assert engine.state.iteration == 1
assert engine.state.epoch == 1
data = torch.from_numpy(np.array([1] * 1000 + [np.nan] * 25))
engine = create_engine()
engine.run(data)
assert engine.should_terminate
assert engine.state.iteration == 1001
assert engine.state.epoch == 1
def test_pbar_on_callable_events(capsys):
n_epochs = 1
loader = list(range(100))
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, event_name=Events.ITERATION_STARTED(every=10), closing_event_name=Events.EPOCH_COMPLETED)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [90/100] 90%|█████████ [00:00<00:00]"
assert actual == expected
def test_tqdm_logger_epoch_length(capsys):
loader = list(range(100))
engine = Engine(update_fn)
pbar = ProgressBar(persist=True)
pbar.attach(engine)
engine.run(loader, epoch_length=50)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [50/50] 100%|██████████ [00:00<00:00]"
assert actual == expected
def test_tqdm_logger_iter_without_epoch_length(capsys):
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
pass
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
pbar = ProgressBar(persist=True)
pbar.attach(trainer)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch [5/5]: [11/11] 100%|██████████ [00:00<00:00]"
assert actual == expected
|
import math
import os
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with TensorboardLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_getattr_method():
# Create a mock SummaryWriter object
mock_writer = MagicMock()
# Assign the mock object to the writer attribute of a TensorboardLoggerinstance
logger = TensorboardLogger()
logger.writer = mock_writer
# Test that a method passed through the __getattr__ method calls thecorresponding method on the mock object
logger.add_scalar("loss", 0.5)
mock_writer.add_scalar.assert_called_once_with("loss", 0.5)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("lr/group_0", 0.01, 123)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("generator/lr/group_0", 0.01, 123)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("tag/output", 12345, 123)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("another_tag/loss", 12345, 123)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 4
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5)],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 55.56, 7)], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5)], any_order=True
)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 3
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a", 12.23, 5), call("tag/b", 23.45, 5), call("tag/loss", 12345, 5)], any_order=True
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)]
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls([call("tag/loss", 12345, 10)])
def test_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsScalarHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_scalar.call_count == 4
mock_logger.writer.add_scalar.assert_has_calls(
[
call(tag_prefix + "weights_norm/fc1/weight", 0.0, 5),
call(tag_prefix + "weights_norm/fc1/bias", 0.0, 5),
call(tag_prefix + "weights_norm/fc2/weight", 12.0, 5),
call(tag_prefix + "weights_norm/fc2/bias", pytest.approx(math.sqrt(12.0)), 5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("weights_norm/fc2/weight", 12.0, 5)
mock_logger.writer.reset_mock()
wrapper = WeightsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/weights_norm/fc1/weight", 0.0, 5),
call("model/weights_norm/fc1/bias", 0.0, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/weights_norm/fc1/bias", 0.0, 5),
call("model/weights_norm/fc2/bias", pytest.approx(math.sqrt(12.0)), 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
def test_weights_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsHistHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_histogram.call_count == 4
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag=tag_prefix + "weights/fc1/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc1/bias", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc2/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_called_once_with(tag="weights/fc2/weight", values=ANY, global_step=5)
mock_logger.writer.reset_mock()
wrapper = WeightsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/weights/fc1/weight", values=ANY, global_step=5),
call(tag="model/weights/fc1/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/weights/fc1/bias", values=ANY, global_step=5),
call(tag="model/weights/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
def test_grads_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsScalarHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
mock_logger.writer.add_scalar.assert_has_calls(
[
call(tag_prefix + "grads_norm/fc1/weight", ANY, 5),
call(tag_prefix + "grads_norm/fc1/bias", ANY, 5),
call(tag_prefix + "grads_norm/fc2/weight", ANY, 5),
call(tag_prefix + "grads_norm/fc2/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_whitelist(dummy_model_factory, norm_mock):
model = dummy_model_factory()
wrapper = GradsScalarHandler(model, reduction=norm_mock, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("grads_norm/fc2/weight", ANY, 5)
mock_logger.writer.reset_mock()
wrapper = GradsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/grads_norm/fc1/weight", ANY, 5),
call("model/grads_norm/fc1/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/grads_norm/fc1/bias", ANY, 5),
call("model/grads_norm/fc2/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
def test_grads_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsHistHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_histogram.call_count == 4
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag=tag_prefix + "grads/fc1/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc1/bias", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc2/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_grads_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_called_once_with(tag="grads/fc2/weight", values=ANY, global_step=5)
mock_logger.writer.reset_mock()
wrapper = GradsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/grads/fc1/weight", values=ANY, global_step=5),
call(tag="model/grads/fc1/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/grads/fc1/bias", values=ANY, global_step=5),
call(tag="model/grads/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
tb_logger = TensorboardLogger(log_dir=dirname)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.writer.add_scalar("test_value", global_step, global_step)
tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
tb_logger.close()
# Check if event files are present
written_files = os.listdir(dirname)
written_files = [f for f in written_files if "tfevents" in f]
assert len(written_files) > 0
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with TensorboardLogger(log_dir=dirname) as tb_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.writer.add_scalar("test_value", global_step, global_step)
tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
# Check if event files are present
written_files = os.listdir(dirname)
written_files = [f for f in written_files if "tfevents" in f]
assert len(written_files) > 0
def test_no_tensorboardX_package(dirname):
from torch.utils.tensorboard import SummaryWriter
with patch.dict("sys.modules", {"tensorboardX": None}):
tb_logger = TensorboardLogger(log_dir=dirname)
assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer)
tb_logger.close()
def test_no_torch_utils_tensorboard_package(dirname):
from tensorboardX import SummaryWriter
with patch.dict("sys.modules", {"torch.utils.tensorboard": None}):
tb_logger = TensorboardLogger(log_dir=dirname)
assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer)
tb_logger.close()
def test_no_tensorboardX_nor_torch_utils_tensorboard():
with patch.dict("sys.modules", {"tensorboardX": None, "torch.utils.tensorboard": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires either tensorboardX or torch"):
TensorboardLogger(log_dir=None)
|
import os
import random
import sys
from collections.abc import Mapping
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
import ignite.distributed as idist
from ignite.engine import Events
from ignite.engine.deterministic import (
_set_rng_states,
DeterministicEngine,
keep_random_state,
ReproducibleBatchSampler,
update_dataloader,
)
from ignite.utils import manual_seed
from tests.ignite.engine import BatchChecker, setup_sampler
def test_dengine_setup_seed_div_by_zero():
with pytest.raises(ValueError, match=r"iter_counter should be positive value"):
DeterministicEngine(lambda e, b: None)._setup_seed(iter_counter=0)
def test_update_dataloader():
def _test(sampler_type=None):
num_epochs = 3
total_batch_size = 4
num_iters = 17
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
num_workers = 2
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
torch.manual_seed(12)
seen_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in dataloader:
t.append(b)
seen_batches.append(t)
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
batch_sampler = dataloader.batch_sampler
new_dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(batch_sampler))
torch.manual_seed(12)
new_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in new_dataloader:
t.append(b)
new_batches.append(t)
for i in range(num_epochs):
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], new_batches[i])])
_test()
_test("weighted")
_test("distributed")
def test_reproducible_batch_sampler_wrong_input():
with pytest.raises(TypeError, match=r"Argument batch_sampler should be torch.utils.data.sampler.BatchSampler"):
ReproducibleBatchSampler("abc")
def test_reproducible_batch_sampler():
data = list(range(100))
dataloader = DataLoader(data, batch_size=12, num_workers=0, shuffle=True, drop_last=True)
torch.manual_seed(12 + 0)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
seen_batches = []
num_epochs = 3
for i in range(num_epochs):
t = []
for b in dataloader_:
t.append(b)
seen_batches.append(t)
torch.manual_seed(12 + i + 1)
for i in range(num_epochs - 1):
for j in range(i + 1, num_epochs):
assert not all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], seen_batches[j])])
for resume_epoch in range(num_epochs):
torch.manual_seed(12 + resume_epoch)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
resumed_seen_batches = []
for b in dataloader_:
resumed_seen_batches.append(b)
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[resume_epoch], resumed_seen_batches)])
def _test_keep_random_state(with_numpy):
manual_seed(54)
true_values = []
for _ in range(5):
t = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
t.append(torch.from_numpy(np.random.rand(2)))
true_values.append(t)
@keep_random_state
def user_handler():
manual_seed(22)
_ = [
random.random(),
torch.rand(2),
]
if with_numpy:
_ = np.random.rand(2)
manual_seed(54)
res_values = []
for _ in range(5):
r = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
r.append(torch.from_numpy(np.random.rand(2)))
res_values.append(r)
user_handler()
for a, b in zip(true_values, res_values):
for i, j in zip(a, b):
assert (i == j).all()
def test_keep_random_state():
_test_keep_random_state(with_numpy=True)
def test_keep_random_state_without_numpy():
with patch.dict("sys.modules", {"numpy": None}):
_test_keep_random_state(with_numpy=False)
def test_strict_resume_from_iter():
def _test(epoch_length=None):
max_epochs = 5
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 4):
batch_checker = BatchChecker(data, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
@engine.on(Events.EPOCH_COMPLETED)
def check_iteration(_):
assert engine.state.iteration == batch_checker.counter
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_strict_resume_from_epoch():
def _test(epoch_length=None):
max_epochs = 10
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
batch_checker = BatchChecker(data, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def _test_resume_random_dataloader_from_epoch(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 5
total_batch_size = 4
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs, 2):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
torch.manual_seed(87)
engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
assert batch_checker.check(
batch
), f"{num_workers} {resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(87)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
if sampler_type != "distributed":
_test(60)
_test(15)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_epoch():
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler)
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="distributed")
class AugmentedData:
def __init__(self, data, enabled=True):
self.data = data
self.enabled = enabled
def __getitem__(self, i):
dp = self.data[i]
r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0
return dp + r * 0.01
def __len__(self):
return len(self.data)
def _test_resume_random_dataloader_from_iter(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 3
total_batch_size = 4
num_iters = 17
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 13):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
torch.manual_seed(12)
engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
cfg_msg = f"{num_workers} {resume_iteration}"
assert batch_checker.check(
batch
), f"{cfg_msg} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(12)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{num_workers}, {resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
if sampler_type != "distributed":
_test(40)
_test(11)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_iter():
_test_resume_random_dataloader_from_iter("cpu", setup_sampler)
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="distributed")
def _test_resume_random_data_iterator_from_epoch(device):
def _test(epoch_length=None):
max_epochs = 5
batch_size = 4
num_iters = 21
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
seen_batchs = []
def update_fn(_, batch):
# if there is a random op when using data batch etc, we can not resume correctly
# torch.rand(1)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(121)
engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(121)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_resume_random_data_iterator_from_epoch():
_test_resume_random_data_iterator_from_epoch("cpu")
def _test_resume_random_data_iterator_from_iter(device):
def _test(epoch_length=None):
max_epochs = 3
batch_size = 4
num_iters = 17
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(1, min(num_iters * max_epochs, epoch_length * max_epochs), 7):
seen_batchs = []
def update_fn(_, batch):
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(24)
engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(24)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
_test(50)
_test(11)
def test_resume_random_data_iterator_from_iter():
_test_resume_random_data_iterator_from_iter("cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.xfail
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
def test_concepts_snippet_resume():
# Commented imports required in the snippet
# import torch
# from torch.utils.data import DataLoader
# from ignite.engine import DeterministicEngine
# from ignite.utils import manual_seed
seen_batches = []
manual_seed(seed=15)
def random_train_data_loader(size):
data = torch.arange(0, size)
return DataLoader(data, batch_size=4, shuffle=True)
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
seen_batches.append(batch)
trainer = DeterministicEngine(print_train_data)
print("Original Run")
manual_seed(56)
trainer.run(random_train_data_loader(40), max_epochs=2, epoch_length=5)
original_batches = list(seen_batches)
seen_batches = []
print("Resumed Run")
trainer.load_state_dict({"epoch": 1, "epoch_length": 5, "max_epochs": 2, "rng_states": None})
manual_seed(56)
trainer.run(random_train_data_loader(40))
resumed_batches = list(seen_batches)
seen_batches = []
for b1, b2 in zip(original_batches[5:], resumed_batches):
assert (b1 == b2).all()
def test_concepts_snippet_warning():
def random_train_data_generator():
while True:
yield torch.randint(0, 100, size=(1,))
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
trainer = DeterministicEngine(print_train_data)
@trainer.on(Events.ITERATION_COMPLETED(every=3))
def user_handler(_):
# handler synchronizes the random state
torch.manual_seed(12)
a = torch.rand(1)
trainer.run(random_train_data_generator(), max_epochs=3, epoch_length=5)
def _test_gradients_on_resume(
dirname, device, with_dropout=True, with_dataaugs=True, data_size=24, batch_size=4, save_iter=None, save_epoch=None
):
debug = False
def random_train_data_loader(size):
d = AugmentedData(torch.rand(size, 3, 32, 32), enabled=with_dataaugs)
return DataLoader(d, batch_size=batch_size, shuffle=True, num_workers=2)
def _train(save_iter=None, save_epoch=None, sd=None):
w_norms = []
grad_norms = []
data = []
chkpt = []
manual_seed(12)
arch = [
nn.Conv2d(3, 10, 3),
nn.ReLU(),
nn.Conv2d(10, 10, 3),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2),
]
if with_dropout:
arch.insert(2, nn.Dropout2d())
arch.insert(-2, nn.Dropout())
model = nn.Sequential(*arch).to(device)
opt = SGD(model.parameters(), lr=0.001)
def proc_fn(e, b):
from ignite.engine.deterministic import _get_rng_states, _repr_rng_state
s = _repr_rng_state(_get_rng_states())
model.train()
opt.zero_grad()
y = model(b.to(device))
y.sum().backward()
opt.step()
if debug:
print(
trainer.state.iteration, trainer.state.epoch, "proc_fn - b.shape", b.shape, torch.norm(y).item(), s
)
trainer = DeterministicEngine(proc_fn)
if save_iter is not None:
ev = Events.ITERATION_COMPLETED(once=save_iter)
elif save_epoch is not None:
ev = Events.EPOCH_COMPLETED(once=save_epoch)
save_iter = save_epoch * (data_size // batch_size)
@trainer.on(ev)
def save_chkpt(_):
if debug:
print(trainer.state.iteration, "save_chkpt")
fp = dirname / "test.pt"
from ignite.engine.deterministic import _repr_rng_state
tsd = trainer.state_dict()
if debug:
print("->", _repr_rng_state(tsd["rng_states"]))
torch.save([model.state_dict(), opt.state_dict(), tsd], fp)
chkpt.append(fp)
def log_event_filter(_, event):
if (event // save_iter == 1) and 1 <= (event % save_iter) <= 5:
return True
return False
@trainer.on(Events.ITERATION_COMPLETED(event_filter=log_event_filter))
def write_data_grads_weights(e):
x = e.state.batch
i = e.state.iteration
data.append([i, x.mean().item(), x.std().item()])
total = [0.0, 0.0]
out1 = []
out2 = []
for p in model.parameters():
n1 = torch.norm(p).item()
n2 = torch.norm(p.grad).item()
out1.append(n1)
out2.append(n2)
total[0] += n1
total[1] += n2
w_norms.append([i, total[0]] + out1)
grad_norms.append([i, total[1]] + out2)
if sd is not None:
sd = torch.load(sd)
model.load_state_dict(sd[0])
opt.load_state_dict(sd[1])
from ignite.engine.deterministic import _repr_rng_state
if debug:
print("-->", _repr_rng_state(sd[2]["rng_states"]))
trainer.load_state_dict(sd[2])
manual_seed(32)
trainer.run(random_train_data_loader(size=data_size), max_epochs=5)
return {"sd": chkpt, "data": data, "grads": grad_norms, "weights": w_norms}
out_original = _train(save_iter=save_iter, save_epoch=save_epoch)
assert len(out_original["sd"]) > 0
out_resumed = _train(save_iter=save_iter, save_epoch=save_epoch, sd=out_original["sd"][0])
if debug:
print("Original:")
print(" data:", out_original["data"])
print("grads:", out_original["grads"])
print(" W:", out_original["weights"])
print("Resume:")
print(" data:", out_resumed["data"])
print("grads:", out_resumed["grads"])
print(" W:", out_resumed["weights"])
# check data:
for d1, d2 in zip(out_original["data"], out_resumed["data"]):
assert d1 == d2
# check grads:
for d1, d2 in zip(out_original["grads"], out_resumed["grads"]):
assert d1 == d2
# check weights:
for d1, d2 in zip(out_original["weights"], out_resumed["weights"]):
assert d1 == d2
def test_gradients_on_resume_cpu(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_iter=25)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_epoch=3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gradients_on_resume_on_cuda(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_iter=25)
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_epoch=3)
def test_engine_with_dataloader_no_auto_batching():
# tests https://github.com/pytorch/ignite/issues/941
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
print(f"{e.state.epoch}-{e.state.iteration}: {b}")
counter[0] += 1
engine = DeterministicEngine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = DeterministicEngine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
class OldDataLoader(DataLoader):
def __init__(self, dl, *args, **kwargs):
self.dl = dl
self.sampler = self.dl.sampler
self.batch_sampler = self.dl.batch_sampler
def __len__(self):
return len(self.dl)
def __iter__(self):
return iter(self.dl)
def test_dataloader_no_dataset_kind():
# tests issue : https://github.com/pytorch/ignite/issues/1022
engine = DeterministicEngine(lambda e, b: None)
data = torch.randint(0, 1000, size=(100 * 4,))
dataloader = DataLoader(data, batch_size=4)
dataloader = OldDataLoader(dataloader)
engine.run(dataloader)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test__set_rng_states_cuda():
# Checks https://github.com/pytorch/ignite/issues/2076
rng_states = [random.getstate(), torch.get_rng_state().cuda(), np.random.get_state()]
_set_rng_states(rng_states)
assert rng_states[1].device.type == "cpu"
def test_engine_no_data_asserts():
trainer = DeterministicEngine(lambda e, b: None)
with pytest.raises(ValueError, match=r"Deterministic engine does not support the option of data=None"):
trainer.run(max_epochs=10, epoch_length=10)
def test_state_dict():
engine = DeterministicEngine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == 4
assert "iteration" in sd and sd["iteration"] == 0
assert "max_epochs" in sd and sd["max_epochs"] is None
assert "epoch_length" in sd and sd["epoch_length"] is None
assert "rng_states" in sd and sd["rng_states"] is not None
|
import os
import time
from unittest.mock import call, MagicMock, Mock
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
class RecordedEngine(Engine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.called_events = []
def _fire_event(self, event_name, *event_args, **event_kwargs):
self.called_events.append((self.state.epoch, self.state.iteration, event_name.name))
return super()._fire_event(event_name, *event_args, **event_kwargs)
def _create_mock_data_loader(epochs, batches_per_epoch):
batches = [MagicMock()] * batches_per_epoch
data_loader_manager = MagicMock()
batch_iterators = [iter(batches) for _ in range(epochs)]
data_loader_manager.__iter__.side_effect = batch_iterators
data_loader_manager.__len__.return_value = batches_per_epoch
return data_loader_manager
@pytest.mark.parametrize("interrupt_resume_enabled", [False, True])
class TestEngine:
@pytest.fixture(autouse=True)
def set_interrupt_resume_enabled(self, interrupt_resume_enabled):
Engine.interrupt_resume_enabled = interrupt_resume_enabled
def test_terminate(self):
engine = Engine(lambda e, b: 1)
assert not engine.should_terminate
engine.terminate()
assert engine.should_terminate
def test_invalid_process_raises_with_invalid_signature(self):
with pytest.raises(ValueError, match=r"Engine must be given a processing function in order to run"):
Engine(None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda batch: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda engine, batch, extra_arg: None)
def test_invalid_input_data(self):
engine = Engine(lambda e, b: None)
def data():
pass
with pytest.raises(TypeError, match=r"Argument data should be iterable"):
engine.run(data)
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_current_epoch_counter_increases_every_epoch(self, data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = EpochCounter()
engine.add_event_handler(Events.EPOCH_STARTED, counter)
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
counter.current_epoch_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2, 3]])
def test_current_iteration_counter_increases_every_iteration(self, data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = IterationCounter()
engine.add_event_handler(Events.ITERATION_STARTED, counter)
epoch_length = 3
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
counter.current_iteration_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
def test_stopping_criterion_is_max_epochs(self):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_terminate_at_end_of_epoch_stops_run(self, data):
max_epochs = 5
last_epoch_to_run = 3
engine = Engine(MagicMock(return_value=1))
def end_of_epoch_handler(engine):
if engine.state.epoch == last_epoch_to_run:
engine.terminate()
engine.add_event_handler(Events.EPOCH_COMPLETED, end_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == last_epoch_to_run
assert engine.should_terminate
assert engine._dataloader_iter is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_at_start_of_epoch(self, data, epoch_length):
max_epochs = 5
epoch_to_terminate_on = 3
real_epoch_length = epoch_length if data is None else len(data)
engine = Engine(MagicMock(return_value=1))
def start_of_epoch_handler(engine):
if engine.state.epoch == epoch_to_terminate_on:
engine.terminate()
engine.add_event_handler(Events.EPOCH_STARTED, start_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# epoch is not completed so counter is not incremented
assert state.epoch == epoch_to_terminate_on
assert engine.should_terminate
assert engine._dataloader_iter is None
assert state.iteration == ((epoch_to_terminate_on - 1) * real_epoch_length)
# Engine continue from epoch_to_terminate_on until max_epochs
first_epoch_iter = [None, None]
@engine.on(Events.STARTED)
def check_iter_epoch():
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
if data is not None:
expected_data_iter = iter(data)
expected_iter = state.iteration
@engine.on(Events.ITERATION_STARTED)
def check_iter_and_data():
nonlocal expected_data_iter, expected_iter
expected_iter += 1
assert engine.state.iteration == expected_iter
try:
assert engine.state.batch == next(expected_data_iter)
except StopIteration:
expected_data_iter = iter(data)
assert engine.state.batch == next(expected_data_iter)
first_epoch_iter[0], first_epoch_iter[1] = state.epoch, state.iteration
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.epoch == max_epochs
assert not engine.should_terminate
assert engine._dataloader_iter is None
# As terminated epoch is skipped -> iterations are not incremented
assert state.iteration == real_epoch_length * (max_epochs - 1)
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_stops_run_mid_epoch(self, data, epoch_length):
max_epochs = 5
iteration_to_stop = 13
real_epoch_length = epoch_length if data is None else len(data)
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate()
@engine.on(Events.EXCEPTION_RAISED)
def assert_no_exceptions(ee):
assert False, f"Engine should terminate without raising an exception, got '{type(ee)}'"
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
assert state.iteration == iteration_to_stop
assert state.epoch == np.ceil(iteration_to_stop / real_epoch_length) # it starts from 0
assert engine._dataloader_iter is None
# Engine continue from epoch_to_terminate_on until max_epochs
first_epoch_iter = [None, None]
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED, first_epoch_iter)
def check_iter_epoch(first_epoch_iter):
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
num_calls_check_iter_epoch += 1
if data is not None:
expected_iter = state.iteration
@engine.on(Events.ITERATION_STARTED)
def check_iter_and_data():
nonlocal expected_iter
expected_iter += 1
assert engine.state.iteration == expected_iter
assert engine.state.batch == data[(expected_iter - first_epoch_iter[1] - 1) % len(data)]
first_epoch_iter[0], first_epoch_iter[1] = state.epoch, state.iteration
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.epoch == max_epochs
assert not engine.should_terminate
assert state.iteration == real_epoch_length * (max_epochs - 1) + (iteration_to_stop % real_epoch_length)
assert num_calls_check_iter_epoch == 1
@pytest.mark.parametrize(
"terminate_event, e, i",
[
(Events.STARTED, 0, 0),
(Events.EPOCH_STARTED(once=2), 2, None),
(Events.EPOCH_COMPLETED(once=2), 2, None),
(Events.GET_BATCH_STARTED(once=12), None, 12),
(Events.GET_BATCH_COMPLETED(once=12), None, 12),
(Events.ITERATION_STARTED(once=14), None, 14),
(Events.ITERATION_COMPLETED(once=14), None, 14),
],
)
def test_terminate_events_sequence(self, terminate_event, e, i):
engine = RecordedEngine(MagicMock(return_value=1))
data = range(10)
max_epochs = 5
@engine.on(terminate_event)
def call_terminate():
engine.terminate()
@engine.on(Events.EXCEPTION_RAISED)
def assert_no_exceptions(ee):
assert False, f"Engine should terminate without raising an exception, got '{type(ee)}'"
engine.run(data, max_epochs=max_epochs)
if i is None:
if terminate_event == Events.EPOCH_STARTED:
i = len(data) * (e - 1)
else:
i = len(data) * e
if e is None:
e = i // len(data) + 1
assert engine.called_events[0] == (0, 0, Events.STARTED)
assert engine.called_events[-1] == (e, i, Events.COMPLETED)
assert engine.called_events[-2] == (e, i, Events.TERMINATE)
assert engine.called_events[-3] == (e, i, terminate_event)
assert engine._dataloader_iter is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_epoch_stops_mid_epoch(self, data, epoch_length):
real_epoch_length = epoch_length if data is None else len(data)
iteration_to_stop = real_epoch_length + 4
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate_epoch()
max_epochs = 3
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
true_value = real_epoch_length * (max_epochs - 1) + iteration_to_stop % real_epoch_length
assert state.iteration == true_value
assert state.epoch == max_epochs
@pytest.mark.parametrize(
"terminate_epoch_event, i",
[
(Events.GET_BATCH_STARTED(once=12), 12),
(Events.GET_BATCH_COMPLETED(once=12), 12),
(Events.ITERATION_STARTED(once=14), 14),
(Events.ITERATION_COMPLETED(once=14), 14),
],
)
def test_terminate_epoch_events_sequence(self, terminate_epoch_event, i):
engine = RecordedEngine(MagicMock(return_value=1))
data = range(10)
max_epochs = 3
# TODO: Bug: Events.GET_BATCH_STARTED(once=12) is called twice !
# prevent call_terminate_epoch to be called twice
call_count = 0
@engine.on(terminate_epoch_event)
def call_terminate_epoch():
nonlocal call_count
if call_count < 1:
engine.terminate_epoch()
call_count += 1
@engine.on(Events.TERMINATE_SINGLE_EPOCH)
def check_previous_events(iter_counter):
e = i // len(data) + 1
assert engine.called_events[0] == (0, 0, Events.STARTED)
assert engine.called_events[-2] == (e, i, terminate_epoch_event)
assert engine.called_events[-1] == (e, i, Events.TERMINATE_SINGLE_EPOCH)
@engine.on(Events.EPOCH_COMPLETED)
def check_previous_events2():
e = i // len(data) + 1
if e == engine.state.epoch and i == engine.state.iteration:
assert engine.called_events[-3] == (e, i, terminate_epoch_event)
assert engine.called_events[-2] == (e, i, Events.TERMINATE_SINGLE_EPOCH)
assert engine.called_events[-1] == (e, i, Events.EPOCH_COMPLETED)
engine.run(data, max_epochs=max_epochs)
assert engine.state.epoch == max_epochs
assert (max_epochs - 1) * len(data) < engine.state.iteration < max_epochs * len(data)
@pytest.mark.parametrize("data", [None, "mock_data_loader"])
def test_iteration_events_are_fired(self, data):
max_epochs = 5
num_batches = epoch_length = 3
if isinstance(data, str) and data == "mock_data_loader":
data = _create_mock_data_loader(max_epochs, num_batches)
epoch_length = None
engine = Engine(MagicMock(return_value=1))
mock_manager = Mock()
iteration_started = Mock()
engine.add_event_handler(Events.ITERATION_STARTED, iteration_started)
iteration_complete = Mock()
engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete)
mock_manager.attach_mock(iteration_started, "iteration_started")
mock_manager.attach_mock(iteration_complete, "iteration_complete")
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert iteration_started.call_count == num_batches * max_epochs
assert iteration_complete.call_count == num_batches * max_epochs
expected_calls = []
for _ in range(max_epochs * num_batches):
expected_calls.append(call.iteration_started(engine))
expected_calls.append(call.iteration_complete(engine))
assert mock_manager.mock_calls == expected_calls
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_last_event_name(self, data):
engine = Engine(MagicMock(return_value=1))
assert engine.last_event_name is None
@engine.on(Events.STARTED)
def _(_engine):
assert _engine.last_event_name == Events.STARTED
@engine.on(Events.EPOCH_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_STARTED
@engine.on(Events.ITERATION_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_STARTED
@engine.on(Events.ITERATION_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_COMPLETED
@engine.on(Events.EPOCH_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_COMPLETED
epoch_length = 2 if data is None else None
engine.run(data, epoch_length=epoch_length)
assert engine.last_event_name == Events.COMPLETED
def test_reset_should_terminate(self):
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
@engine.on(Events.ITERATION_COMPLETED)
def terminate_on_iteration_10(engine):
if engine.state.iteration == 10:
engine.terminate()
engine.run([0] * 20)
assert engine.state.iteration == 10
engine.run([0] * 20)
assert engine.state.iteration == 10
def test_batch_values(self):
def _test(data):
# This test check the content passed to update function
counter = [0]
num_iters = len(data)
def update_fn(_, batch):
assert batch == data[counter[0] % num_iters]
counter[0] += 1
engine = Engine(update_fn)
engine.run(data, max_epochs=10)
data = torch.randint(0, 1000, size=(256,))
_test(data)
def test_state_repr(self):
data = [0, 1, 2, 3, 4, 5]
max_epochs = 1
metrics = {"accuracy": Mock()}
state = State(dataloader=data, max_epochs=max_epochs, metrics=metrics)
s = repr(state)
assert "iteration" in s
assert "epoch" in s
assert "max_epochs: 1" in s
assert "dataloader" in s
assert "metrics" in s
assert "output" in s
assert "batch" in s
def test_alter_batch(self):
small_shape = (1, 2, 2)
large_shape = (1, 3, 3)
small_loader = torch.randint(0, 256, size=(30,) + small_shape)
large_loader = torch.randint(0, 256, size=(20,) + large_shape)
switch_iteration = 50
def should_take_large_img(i):
return i >= switch_iteration
def update_fn(engine, batch):
i = engine.state.iteration
if i < switch_iteration:
assert batch.shape == small_shape
assert (small_loader[(i - 1) % len(small_loader), ...] == batch).all()
else:
assert batch.shape == large_shape
assert (large_loader[(i - switch_iteration) % len(large_loader), ...] == batch).all()
trainer = Engine(update_fn)
def cycle(seq):
while True:
for i in seq:
yield i
small_loader_iter = cycle(small_loader)
large_loader_iter = cycle(large_loader)
@trainer.on(Events.ITERATION_STARTED)
def choose_batch(engine):
i = engine.state.iteration
if should_take_large_img(i):
batch = next(large_loader_iter)
else:
batch = next(small_loader_iter)
engine.state.batch = batch
num_epochs = 5
num_iters = 25
data = range(num_iters)
trainer.run(data, num_epochs)
def test__is_done(self):
state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
assert not Engine._is_done(state)
state = State(iteration=1000, max_epochs=10, epoch_length=100)
assert Engine._is_done(state)
def test__setup_engine(self):
engine = Engine(lambda e, b: 1)
engine.state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
data = list(range(100))
engine.state.dataloader = data
engine._setup_engine()
assert engine._init_iter == 10
def test_run_asserts(self):
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"Input data has zero size. Please provide non-empty data"):
engine.run([])
def test_state_get_event_attrib_value(self):
state = State()
state.iteration = 10
state.epoch = 9
e = Events.ITERATION_STARTED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.ITERATION_STARTED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
@pytest.mark.parametrize(
"data, max_epochs, epoch_length", [(range(100), 2, 100), (range(200), 2, 100), (range(200), 5, 100)]
)
def test_time_stored_in_state(self, data, max_epochs, epoch_length):
sleep_time = 0.01
extra_sleep_time = 0.1
engine = Engine(lambda e, b: time.sleep(sleep_time))
@engine.on(Events.EPOCH_COMPLETED)
def check_epoch_time():
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length
time.sleep(extra_sleep_time)
@engine.on(Events.COMPLETED)
def check_completed_time():
assert (
engine.state.times[Events.COMPLETED.name] >= (sleep_time * epoch_length + extra_sleep_time) * max_epochs
)
time.sleep(extra_sleep_time)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length + extra_sleep_time
assert (
engine.state.times[Events.COMPLETED.name]
>= (sleep_time * epoch_length + extra_sleep_time) * max_epochs + extra_sleep_time
)
def _test_check_triggered_events(self, data, max_epochs, epoch_length, exp_iter_stops=None):
engine = Engine(lambda e, b: 1)
events = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.DATALOADER_STOP_ITERATION,
]
handlers = {e: MagicMock() for e in events}
for e, handler in handlers.items():
engine.add_event_handler(e, handler)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
expected_num_calls = {
Events.STARTED: 1,
Events.COMPLETED: 1,
Events.EPOCH_STARTED: max_epochs,
Events.EPOCH_COMPLETED: max_epochs,
Events.ITERATION_STARTED: max_epochs * epoch_length,
Events.ITERATION_COMPLETED: max_epochs * epoch_length,
Events.GET_BATCH_STARTED: max_epochs * epoch_length,
Events.GET_BATCH_COMPLETED: max_epochs * epoch_length,
Events.DATALOADER_STOP_ITERATION: (max_epochs - 1) if exp_iter_stops is None else exp_iter_stops,
}
for n, handler in handlers.items():
assert handler.call_count == expected_num_calls[n], f"{n}: {handler.call_count} vs {expected_num_calls[n]}"
def _test_run_check_triggered_events(self):
# tests issue https://github.com/pytorch/ignite/issues/818
self._test_check_triggered_events(list(range(10)), max_epochs=4, epoch_length=10)
self._test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=100)
self._test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=50, exp_iter_stops=50 * 5 // 100)
self._test_check_triggered_events(
list(range(100)), max_epochs=5, epoch_length=150, exp_iter_stops=150 * 5 // 100
)
self._test_check_triggered_events(None, max_epochs=5, epoch_length=150)
def test_run_check_triggered_events_list(self):
self._test_run_check_triggered_events()
def _test_run_check_triggered_events_on_iterator(self):
def infinite_data_iterator():
while True:
for i in range(100):
yield i
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=100, exp_iter_stops=0)
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=50, exp_iter_stops=0)
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=150, exp_iter_stops=0)
def limited_data_iterator():
for i in range(100):
yield i
self._test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=100, exp_iter_stops=0)
self._test_check_triggered_events(limited_data_iterator(), max_epochs=10, epoch_length=10, exp_iter_stops=0)
# These tests will fail
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=100)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=75)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=101)
def test_run_check_triggered_events_on_iterator(self):
self._test_run_check_triggered_events_on_iterator()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(self, distributed_context_single_node_nccl):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(self, distributed_context_single_node_gloo):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(self, distributed_context_multi_node_gloo):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(self, distributed_context_multi_node_nccl):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
def test_engine_random_state(self):
def random_data_generator():
while True:
yield torch.randint(0, 100, size=(5,))
def sum_data(_, batch):
result = torch.sum(batch)
return result
def get_engine():
engine = Engine(sum_data)
average = Average()
average.attach(engine, "average")
return engine
torch.manual_seed(34)
engine = get_engine()
state1 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(34)
engine = get_engine()
state2 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(42)
engine = get_engine()
state3 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
assert state1.metrics["average"] == pytest.approx(state2.metrics["average"])
assert state1.metrics["average"] != pytest.approx(state3.metrics["average"])
assert state2.metrics["average"] != pytest.approx(state3.metrics["average"])
def test_altered_random_state(self):
# tests issue https://github.com/pytorch/ignite/issues/795
size = 1
def random_train_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,))
def random_val_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,)) + 100
train_only_batches = []
def train_fn(_, batch):
train_only_batches.append(batch[0].item())
torch.manual_seed(1)
epoch_length = 6
trainer = Engine(train_fn)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
def val_fn(_1, _2):
pass
evaluator = Engine(val_fn)
train_batches = []
def train_fn2(_, batch):
train_batches.append(batch[0].item())
trainer = Engine(train_fn2)
@trainer.on(Events.EPOCH_COMPLETED)
@keep_random_state
def run_evaluation(_):
evaluator.run(random_val_data_generator(size), epoch_length=4)
torch.manual_seed(1)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
for i in range(epoch_length):
assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i]
assert train_batches[i] == train_only_batches[i]
def test_engine_with_dataloader_no_auto_batching(self):
# tests https://github.com/pytorch/ignite/issues/941
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
counter[0] += 1
engine = Engine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_once_finite_iterator_no_epoch_length(self):
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
completed_handler = MagicMock()
engine.add_event_handler(Events.COMPLETED, completed_handler)
data_iter = finite_unk_size_data_iter()
engine.run(data_iter)
assert engine.state.epoch == 1
assert engine.state.iteration == unknown_size
assert completed_handler.call_count == 1
def test_run_finite_iterator_no_epoch_length(self):
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
def test_run_finite_iterator_no_epoch_length_2(self):
# FR: https://github.com/pytorch/ignite/issues/871
known_size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
bc = BatchChecker(data=list(range(known_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.ITERATION_COMPLETED(every=known_size))
def restart_iter():
engine.state.dataloader = finite_size_data_iter(known_size)
data_iter = finite_size_data_iter(known_size)
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == known_size * 5
def test_faq_inf_iterator_with_epoch_length(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
# We need to specify epoch_length to define the epoch
trainer.run(infinite_iterator(4), epoch_length=5, max_epochs=3)
assert trainer.state.epoch == 3
assert trainer.state.iteration == 3 * 5
def test_faq_inf_iterator_no_epoch_length(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=15))
def stop_training():
trainer.terminate()
trainer.run(infinite_iterator(4))
assert trainer.state.epoch == 1
assert trainer.state.iteration == 15
def test_faq_fin_iterator_unknw_size(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
trainer.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * 11
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_unk_size_data_iter()
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == 1 * 11
def test_faq_fin_iterator(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * size
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_size_data_iter(size)
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == size
def test_set_data(self):
# tests FR https://github.com/pytorch/ignite/issues/833
from torch.utils.data import DataLoader
num_iters1 = 10
num_iters2 = 20
batch_size = 4
torch.manual_seed(1)
data1 = DataLoader(torch.rand(num_iters1 * batch_size, 11), batch_size=batch_size)
data2 = DataLoader(torch.rand(num_iters2 * batch_size, 22), batch_size=batch_size)
switch_iteration = 35
def train_fn(e, batch):
if e.state.iteration <= switch_iteration:
assert batch.shape[1] == 11, f"{e.state.iteration}: {batch.shape}"
else:
assert batch.shape[1] == 22, f"{e.state.iteration}: {batch.shape}"
trainer = Engine(train_fn)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=10)
def test_run_with_max_iters(self):
max_iters = 8
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
assert engine.state.max_iters == max_iters
def test_run_with_max_iters_greater_than_epoch_length(self):
max_iters = 73
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
def test_run_with_invalid_max_iters_and_max_epoch(self):
max_iters = 12
max_epochs = 2
engine = Engine(lambda e, b: 1)
with pytest.raises(
ValueError,
match=r"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters.",
):
engine.run([0] * 20, max_iters=max_iters, max_epochs=max_epochs)
def test_epoch_events_fired_max_iters(self):
max_iters = 32
engine = Engine(lambda e, b: 1)
@engine.on(Events.EPOCH_COMPLETED)
def fired_event(engine):
assert engine.state.iteration % engine.state.epoch_length == 0
engine.run([0] * 10, max_iters=max_iters)
def test_is_done_with_max_iters(self):
state = State(iteration=100, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert not Engine._is_done(state)
state = State(iteration=250, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert Engine._is_done(state)
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_batch_is_released_before_new_one_is_loaded_on_cuda(self):
torch.cuda.empty_cache()
engine = Engine(lambda e, b: None)
def _test():
mem_consumption = []
def dataloader():
for _ in range(4):
mem_consumption.append(torch.cuda.memory_allocated())
batch = torch.randn(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
yield batch
engine.run(dataloader(), max_epochs=2, epoch_length=2)
return mem_consumption
mem_consumption1 = _test()
# mem_consumption should look like [0, 512, 512, 512, 512, 512, 512, 512]
assert len(set(mem_consumption1[1:])) == 1
mem_consumption2 = _test()
assert len(set(mem_consumption2[1:])) == 1
assert mem_consumption1 == mem_consumption2
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_output_is_released_before_new_one_is_assigned_on_cuda(self):
torch.cuda.empty_cache()
def _test():
mem_consumption = []
def update_fn(engine, batch):
mem_consumption.append(torch.cuda.memory_allocated())
output = torch.rand(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
return output
engine = Engine(update_fn)
engine.run([0, 1], max_epochs=2)
return mem_consumption
mem_consumption1 = _test()[2:]
# mem_consumption ~ [0, 512, 0, 512, 0, 512, 0, 512]
assert len(set(mem_consumption1)) == 2
mem_consumption2 = _test()[2:]
assert len(set(mem_consumption2)) == 2
assert mem_consumption1 == mem_consumption2
def test_engine_no_data_asserts(self):
trainer = Engine(lambda e, b: None)
with pytest.raises(ValueError, match=r"epoch_length should be provided if data is None"):
trainer.run(max_epochs=10)
def test_engine_no_data(self):
def train_step(engine, batch):
assert batch is None
trainer = Engine(train_step)
trainer.run(max_epochs=10, epoch_length=10)
assert trainer.state.iteration == 10 * 10
assert trainer.state.epoch == 10
assert trainer.state.dataloader is None
# continue
trainer.run(max_epochs=20)
assert trainer.state.iteration == 20 * 10
assert trainer.state.epoch == 20
assert trainer.state.dataloader is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_engine_run_resume(self, data, epoch_length):
# https://github.com/pytorch/ignite/wiki/Roadmap#runresume-logic-improvements
engine = Engine(lambda e, b: None)
real_epoch_length = len(data) if data is not None else epoch_length
first_epoch_iter = [None, None]
@engine.on(Events.STARTED, first_epoch_iter)
def check_iter_epoch(first_epoch_iter):
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
# (re)start from 0 to 5
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=5 => state.epoch=5
engine.run(data, max_epochs=5, epoch_length=epoch_length)
assert engine.state.epoch == 5
assert engine.state.iteration == 5 * real_epoch_length
# continue from 5 to 7
first_epoch_iter[0], first_epoch_iter[1] = 5, 5 * real_epoch_length
# Engine run resuming from iteration 50, epoch 5 until 7 epochs => state.epoch=7
engine.run(data, max_epochs=7, epoch_length=epoch_length)
assert engine.state.epoch == 7
assert engine.state.iteration == 7 * real_epoch_length
# error
with pytest.raises(ValueError, match="Argument max_epochs should be greater than or equal to the start epoch"):
engine.run(data, max_epochs=4, epoch_length=epoch_length)
# restart from 0 to 7 (As state.epoch == max_epochs(=7),
# this should be like that as we always do: evaluator.run(data) without any other instructions)
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=7 => state.epoch=7
engine.run(data, max_epochs=7, epoch_length=epoch_length)
assert engine.state.epoch == 7
assert engine.state.iteration == 7 * real_epoch_length
# forced restart from 0 to 5
engine.state.max_epochs = None
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=5 => state.epoch=5
engine.run(data, max_epochs=5, epoch_length=epoch_length)
assert engine.state.epoch == 5
assert engine.state.iteration == 5 * real_epoch_length
# forced restart from 0 to 9, instead of continue from state.epoch=5
engine.state.max_epochs = None
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=9 => state.epoch=9
engine.run(data, max_epochs=9, epoch_length=epoch_length)
assert engine.state.epoch == 9
assert engine.state.iteration == 9 * real_epoch_length
# continue from 9 until 10
first_epoch_iter[0], first_epoch_iter[1] = 9, 9 * real_epoch_length
# Engine run resuming from iteration 90, epoch 9 until 10 epochs => state.epoch=10
engine.run(data, max_epochs=10, epoch_length=epoch_length)
assert engine.state.epoch == 10
assert engine.state.iteration == 10 * real_epoch_length
@pytest.mark.parametrize(
"interrupt_event, e, i",
[
(Events.EPOCH_STARTED(once=2), 2, None),
(Events.EPOCH_COMPLETED(once=2), 2, None),
(Events.GET_BATCH_STARTED(once=12), None, 12),
(Events.GET_BATCH_COMPLETED(once=12), None, 12),
(Events.ITERATION_STARTED(once=14), None, 14),
(Events.ITERATION_COMPLETED(once=14), None, 14),
],
)
def test_engine_run_interrupt_resume(interrupt_event, e, i):
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 5
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = RecordedEngine(check_input_data)
engine.run(data, max_epochs=max_epochs)
expected_called_events = list(engine.called_events)
engine.called_events = []
@engine.on(interrupt_event)
def call_interrupt():
engine.interrupt()
state = engine.run(data, max_epochs=max_epochs)
if i is None:
if interrupt_event == Events.EPOCH_STARTED:
i = len(data) * (e - 1)
else:
i = len(data) * e
if e is None:
e = i // len(data) + 1
# Check the last events
assert engine.called_events[-1] == (e, i, Events.INTERRUPT)
assert engine.called_events[-2] == (e, i, interrupt_event)
assert state.epoch == e
assert state.iteration == i
assert not engine.should_interrupt
# implementation detail check:
assert engine._dataloader_iter is not None
assert engine._internal_run_generator is not None
le = len(engine.called_events)
# We need to skip the last INTERRUPT event to compare
assert expected_called_events[: le - 1] == engine.called_events[:-1]
engine.called_events = []
@engine.on(Events.STARTED)
def raise_error():
raise RuntimeError("Shouldn't be here")
engine.run(data, max_epochs=max_epochs)
assert expected_called_events[le - 1 :] == engine.called_events
# implementation detail check:
assert engine._dataloader_iter is None
assert engine._internal_run_generator is None
def test_engine_run_multiple_interrupt_resume():
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 3
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
can_interrupt = True
@engine.on(Events.ITERATION_COMPLETED(every=6))
def call_interrupt():
if can_interrupt:
engine.interrupt()
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 1 and state.epoch == 1
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 2 and state.epoch == 2
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 3 and state.epoch == 2
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 4 and state.epoch == 3
# We did an interruption on the last epoch
assert state.epoch == max_epochs
# Run remaining iterations without interruptions
can_interrupt = False
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
# Check implementation details
assert engine._dataloader_iter is None
assert engine._internal_run_generator is None
# Rerun the engine from start to end without interruptions
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED)
def check_iter_epoch():
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == 0
assert engine.state.iteration == 0
num_calls_check_iter_epoch += 1
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
assert num_calls_check_iter_epoch == 1
def test_engine_should_interrupt_error():
Engine.interrupt_resume_enabled = False
engine = Engine(lambda e, b: None)
with pytest.raises(RuntimeError, match="Engine 'interrupt/resume' feature is disabled"):
engine.interrupt()
Engine.interrupt_resume_enabled = True
def test_engine_interrupt_restart():
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 3
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
can_interrupt = True
@engine.on(Events.ITERATION_COMPLETED(every=11))
def call_interrupt():
if can_interrupt:
engine.interrupt()
# Run and interrupt
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 11 and state.epoch == 2
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED)
def check_iter_epoch():
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == 0
assert engine.state.iteration == 0
num_calls_check_iter_epoch += 1
# Reset and run with interruption
state.max_epochs = None
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 11 and state.epoch == 2
assert num_calls_check_iter_epoch == 1
can_interrupt = False
num_calls_check_iter_epoch = 0
# Reset and run without interruption
state.max_epochs = None
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
assert num_calls_check_iter_epoch == 1
|
from collections.abc import Mapping
import pytest
import torch
from ignite.engine import Engine, Events, State
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == 3
assert "iteration" in sd and sd["iteration"] == 0
assert "max_epochs" in sd and sd["max_epochs"] is None
assert "epoch_length" in sd and sd["epoch_length"] is None
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test(State(iteration=500, epoch_length=1000, max_epochs=100))
_test(State(epoch=5, epoch_length=1000, max_epochs=100))
def test_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1 + len(
engine.state_dict_user_keys
)
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test(State(iteration=500, epoch_length=1000, max_epochs=100, alpha=0.01, beta="Good"))
def test_state_dict_integration():
engine = Engine(lambda e, b: 1)
data = range(100)
engine.run(data, max_epochs=10)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration == 10 * 100
assert sd["epoch_length"] == engine.state.epoch_length == 100
assert sd["max_epochs"] == engine.state.max_epochs == 10
def test_load_state_dict_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary"):
engine.load_state_dict("123")
with pytest.raises(ValueError, match=r"is absent in provided state_dict"):
engine.load_state_dict({})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12, "epoch": 123})
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
with pytest.raises(ValueError, match=r"Required user state attribute"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12})
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"If epoch is provided in the state dict, epoch_length should not be None"):
engine.load_state_dict({"max_epochs": 100, "epoch": 2, "epoch_length": None})
def test_load_state_dict():
engine = Engine(lambda e, b: 1)
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123})
_test({"max_epochs": 100, "epoch_length": 120, "epoch": 5})
def test_load_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123, "alpha": 0.1, "beta": "abc"})
def test_load_state_dict_integration():
engine = Engine(lambda e, b: 1)
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
engine.load_state_dict(state_dict)
engine.add_event_handler(Events.ITERATION_COMPLETED, IterationCounter(5 * 120 + 1))
engine.add_event_handler(Events.EPOCH_COMPLETED, EpochCounter(6))
data = range(120)
engine.run(data)
def test_load_state_dict_with_params_overriding_integration():
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
data = range(120)
# Override max_epochs
new_max_epochs = 10
engine = Engine(lambda e, b: 1)
engine.load_state_dict(state_dict)
state = engine.run(data, max_epochs=new_max_epochs)
assert state.max_epochs == new_max_epochs
assert state.iteration == state_dict["epoch_length"] * new_max_epochs
assert state.epoch == new_max_epochs
with pytest.raises(ValueError, match=r"Argument max_epochs should be greater than or equal to the start epoch"):
engine.load_state_dict(state_dict)
engine.run(data, max_epochs=3)
# Override epoch_length
with pytest.raises(ValueError, match=r"Argument epoch_length should be same as in the state"):
engine.load_state_dict(state_dict)
engine.run(data, epoch_length=90)
def test_empty_state_dict_load_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
engine.load_state_dict(sd)
def test_continue_training():
# Tests issue : https://github.com/pytorch/ignite/issues/993
max_epochs = 2
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=max_epochs)
assert state.max_epochs == max_epochs
assert state.iteration == len(data) * max_epochs
assert state.epoch == max_epochs
@engine.on(Events.STARTED)
def assert_continue_training():
assert engine.state.epoch == max_epochs
state = engine.run(data, max_epochs=max_epochs * 2)
assert state.max_epochs == max_epochs * 2
assert state.iteration == len(data) * max_epochs * 2
assert state.epoch == max_epochs * 2
def test_state_dict_with_user_keys_integration(dirname):
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
@engine.on(Events.STARTED)
def init_user_values(_):
engine.state.alpha = 0.1
fp = dirname / "engine.pt"
@engine.on(Events.COMPLETED)
def save_engine(_):
state_dict = engine.state_dict()
assert "alpha" in state_dict
torch.save(state_dict, fp)
engine.run([0, 1])
assert fp.exists()
state_dict = torch.load(fp)
assert "alpha" in state_dict and state_dict["alpha"] == 0.1
def test_epoch_length():
def _test(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(data, max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
def _test_as_iter(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(iter(data), max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
max_epochs = 10
num_iters = 21
data = torch.randint(0, 1000, size=(num_iters,))
_test(data, max_epochs, num_iters=None)
_test(data, max_epochs, num_iters)
_test(data, max_epochs, num_iters // 2)
_test(data, max_epochs, num_iters * 2)
_test_as_iter(data, 1, num_iters)
_test_as_iter(data, 2, num_iters // 2)
def test_state_custom_attrs_init():
def _test(with_load_state_dict=False):
engine = Engine(lambda e, b: None)
engine.state.alpha = 0.0
engine.state.beta = 1.0
if with_load_state_dict:
engine.load_state_dict({"iteration": 3, "max_epochs": 5, "epoch_length": 5})
@engine.on(Events.STARTED | Events.EPOCH_STARTED | Events.EPOCH_COMPLETED | Events.COMPLETED)
def check_custom_attr():
assert hasattr(engine.state, "alpha") and engine.state.alpha == 0.0
assert hasattr(engine.state, "beta") and engine.state.beta == 1.0
engine.run([0, 1, 2, 3, 4], max_epochs=5)
_test()
_test(with_load_state_dict=True)
def test_restart_training():
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=5)
with pytest.raises(
ValueError,
match=r"Argument max_epochs should be greater than or equal to the start epoch defined in the state: 2 vs 5. "
r"Please, .+ "
r"before calling engine.run\(\) in order to restart the training from the beginning.",
):
state = engine.run(data, max_epochs=2)
state.max_epochs = None
engine.run(data, max_epochs=2)
|
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
self.true_batch = self.data[self.counter % len(self.data)]
self.counter += 1
res = self.true_batch == batch
return res.all() if not isinstance(res, bool) else res
class IterationCounter:
def __init__(self, start_value=1):
self.current_iteration_count = start_value
def __call__(self, engine):
assert engine.state.iteration == self.current_iteration_count
self.current_iteration_count += 1
class EpochCounter:
def __init__(self, start_value=1):
self.current_epoch_count = start_value
def __call__(self, engine):
assert engine.state.epoch == self.current_epoch_count
self.current_epoch_count += 1
def setup_sampler(sampler_type, num_iters, batch_size):
if sampler_type is None:
return None, batch_size
if sampler_type == "weighted":
from torch.utils.data.sampler import WeightedRandomSampler
w = torch.ones(num_iters * batch_size, dtype=torch.float)
for i in range(num_iters):
w[batch_size * i : batch_size * (i + 1)] += i * 1.0
return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True), batch_size
if sampler_type == "distributed":
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
num_replicas = 1
rank = 0
if dist.is_available() and dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
dataset = torch.zeros(num_iters * batch_size)
return DistributedSampler(dataset, num_replicas=num_replicas, rank=rank), batch_size // num_replicas
class MyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(MyIterableDataset).__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def get_iterable_dataset(*args, **kwargs):
return MyIterableDataset(*args, **kwargs)
|
from enum import Enum
from unittest.mock import MagicMock
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, EventEnum, EventsList
def test_custom_events():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
# Dummy engine
engine = Engine(lambda engine, batch: 0)
engine.register_events(*CustomEvents)
engine.register_events("a", "b", "c")
evs = [CustomEvents.TEST_EVENT, "a", "b", "c"]
# Handle is never called
handlers = [(e, MagicMock()) for e in evs]
for e, h in handlers:
engine.add_event_handler(e, h)
engine.run(range(1))
for _, h in handlers:
assert not h.called
# Advanced engine
def process_func(engine, batch):
for e, _ in handlers:
engine.fire_event(e)
engine = Engine(process_func)
engine.register_events(*CustomEvents)
engine.register_events("a", "b", "c")
# Handle should be called
handlers = [(e, MagicMock()) for e in evs]
for e, h in handlers:
engine.add_event_handler(e, h)
engine.run(range(1))
for _, h in handlers:
assert h.called
def test_custom_events_asserts():
# Dummy engine
engine = Engine(lambda engine, batch: 0)
class A:
pass
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(None)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events("str", None)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(1)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(A())
assert Events.EPOCH_COMPLETED != 1
assert Events.EPOCH_COMPLETED != "abc"
assert Events.ITERATION_COMPLETED != Events.EPOCH_COMPLETED
assert Events.ITERATION_COMPLETED != Events.EPOCH_COMPLETED(every=2)
# In current implementation, EPOCH_COMPLETED and EPOCH_COMPLETED with event filter are the same
assert Events.EPOCH_COMPLETED == Events.EPOCH_COMPLETED(every=2)
assert Events.ITERATION_COMPLETED == Events.ITERATION_COMPLETED(every=2)
def test_custom_events_with_event_to_attr():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
custom_event_to_attr = {CustomEvents.TEST_EVENT: "test_event"}
# Dummy engine
engine = Engine(lambda engine, batch: 0)
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
# Handle is never called
handle = MagicMock()
engine.add_event_handler(CustomEvents.TEST_EVENT, handle)
engine.run(range(1))
assert hasattr(engine.state, "test_event")
assert engine.state.test_event == 0
# Advanced engine
def process_func(engine, batch):
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(process_func)
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
def handle(engine):
engine.state.test_event += 1
engine.add_event_handler(CustomEvents.TEST_EVENT, handle)
engine.run(range(25))
assert engine.state.test_event == 25
custom_event_to_attr = "a"
engine = Engine(lambda engine, batch: 0)
with pytest.raises(ValueError):
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
def test_custom_events_with_events_list():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
def process_func(engine, batch):
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(process_func)
engine.register_events(*CustomEvents)
# Handle should be called
handle = MagicMock()
engine.add_event_handler(CustomEvents.TEST_EVENT | Events.STARTED, handle)
engine.run(range(1))
assert handle.called
def test_callable_events_with_wrong_inputs():
def ef(e, i):
return 1
expected_raise = {
# event_filter, every, once, before, after
(None, None, None, None, None): True, # raises ValueError
(ef, None, None, None, None): False,
(None, 2, None, None, None): False,
(ef, 2, None, None, None): True,
(None, None, 2, None, None): False,
(ef, None, 2, None, None): True,
(None, 2, 2, None, None): True,
(ef, 2, 2, None, None): True,
(None, None, None, 30, None): False,
(ef, None, None, 30, None): True,
(None, 2, None, 30, None): False,
(ef, 2, None, 30, None): True,
(None, None, 2, 30, None): True,
(ef, None, 2, 30, None): True,
(None, 2, 2, 30, None): True,
(ef, 2, 2, 30, None): True,
# event_filter, every, once, before, after
(None, None, None, None, 10): False,
(ef, None, None, None, 10): True,
(None, 2, None, None, 10): False,
(ef, 2, None, None, 10): True,
(None, None, 2, None, 10): True,
(ef, None, 2, None, 10): True,
(None, 2, 2, None, 10): True,
(ef, 2, 2, None, 10): True,
(None, None, None, 25, 8): False,
(ef, None, None, 25, 8): True,
(None, 2, None, 25, 8): False,
(ef, 2, None, 25, 8): True,
(None, None, 2, 25, 8): True,
(ef, None, 2, 25, 8): True,
(None, 2, 2, 25, 8): True,
(ef, 2, 2, 25, 8): True,
}
for event_filter in [None, ef]:
for every in [None, 2]:
for once in [None, 2]:
for before, after in [(None, None), (None, 10), (30, None), (25, 8)]:
if expected_raise[(event_filter, every, once, before, after)]:
with pytest.raises(
ValueError,
match=r"Only one of the input arguments should be specified, "
"except before, after and every",
):
Events.ITERATION_STARTED(
event_filter=event_filter, once=once, every=every, before=before, after=after
)
else:
Events.ITERATION_STARTED(
event_filter=event_filter, once=once, every=every, before=before, after=after
)
with pytest.raises(TypeError, match=r"Argument event_filter should be a callable"):
Events.ITERATION_STARTED(event_filter="123")
with pytest.raises(ValueError, match=r"Argument every should be integer and greater than zero"):
Events.ITERATION_STARTED(every=-1)
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=-1)
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=[1, 10.0, "pytorch"])
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=[])
with pytest.raises(ValueError, match=r"Argument before should be integer and greater or equal to zero"):
Events.ITERATION_STARTED(before=-1)
with pytest.raises(ValueError, match=r"Argument after should be integer and greater or equal to zero"):
Events.ITERATION_STARTED(after=-1)
with pytest.raises(ValueError, match=r"but will be called with"):
Events.ITERATION_STARTED(event_filter=lambda x: x)
with pytest.warns(UserWarning, match=r"default_event_filter is deprecated and will be removed"):
Events.default_event_filter(None, None)
@pytest.mark.parametrize(
"event",
[
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.STARTED,
Events.COMPLETED,
],
)
def test_callable_events(event):
assert isinstance(event.value, str)
def foo(engine, _):
return True
ret = event(event_filter=foo)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter == foo
assert event.name in f"{ret}"
ret = event(every=10)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event(once=10)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event(once=[1, 10])
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event
assert isinstance(ret, CallableEventWithFilter)
assert ret.filter is None
assert event.name in f"{ret}"
def test_callable_events_every_eq_one():
e = Events.ITERATION_STARTED(every=1)
assert isinstance(e, CallableEventWithFilter)
def test_has_handler_on_callable_events():
engine = Engine(lambda e, b: 1)
def foo(e):
pass
assert not engine.has_event_handler(foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo)
assert engine.has_event_handler(foo)
def bar(e):
pass
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
assert engine.has_event_handler(bar, Events.EPOCH_COMPLETED)
assert engine.has_event_handler(bar, Events.EPOCH_COMPLETED(every=3))
def test_remove_event_handler_on_callable_events():
engine = Engine(lambda e, b: 1)
def foo(e):
pass
assert not engine.has_event_handler(foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo)
assert engine.has_event_handler(foo)
engine.remove_event_handler(foo, Events.EPOCH_STARTED)
assert not engine.has_event_handler(foo)
def bar(e):
pass
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
engine.remove_event_handler(bar, Events.EPOCH_COMPLETED)
assert not engine.has_event_handler(bar)
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
engine.remove_event_handler(bar, Events.EPOCH_COMPLETED(every=3))
assert not engine.has_event_handler(bar)
def _test_every_event_filter_with_engine(device="cpu"):
data = torch.rand(100, 4, device=device)
def _test(event_name, event_attr, every, true_num_calls):
engine = Engine(lambda e, b: b)
counter = [0]
counter_every = [0]
num_calls = [0]
@engine.on(event_name(every=every))
def assert_every(engine):
counter_every[0] += every
assert getattr(engine.state, event_attr) % every == 0
assert counter_every[0] == getattr(engine.state, event_attr)
num_calls[0] += 1
@engine.on(event_name(every=every))
def assert_every_no_engine():
assert getattr(engine.state, event_attr) % every == 0
assert counter_every[0] == getattr(engine.state, event_attr)
@engine.on(event_name)
def assert_(engine):
counter[0] += 1
assert getattr(engine.state, event_attr) == counter[0]
@engine.on(event_name)
def assert_no_engine():
assert getattr(engine.state, event_attr) == counter[0]
engine.run(data, max_epochs=5)
assert num_calls[0] == true_num_calls
_test(Events.ITERATION_STARTED, "iteration", 10, 100 * 5 // 10)
_test(Events.ITERATION_COMPLETED, "iteration", 10, 100 * 5 // 10)
_test(Events.EPOCH_STARTED, "epoch", 2, 5 // 2)
_test(Events.EPOCH_COMPLETED, "epoch", 2, 5 // 2)
def test_every_event_filter_with_engine():
_test_every_event_filter_with_engine()
@pytest.mark.parametrize(
"event_name, event_attr, before, expect_calls",
[
(Events.ITERATION_COMPLETED, "iteration", 0, 0),
(Events.ITERATION_COMPLETED, "iteration", 300, 299),
(Events.ITERATION_COMPLETED, "iteration", 501, 500),
(Events.EPOCH_COMPLETED, "epoch", 0, 0),
(Events.EPOCH_COMPLETED, "epoch", 3, 2),
(Events.EPOCH_COMPLETED, "epoch", 6, 5),
],
)
def test_before_event_filter_with_engine(event_name, event_attr, before, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(before=before))
def _before_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) < before
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, after, expect_calls",
[
(Events.ITERATION_STARTED, "iteration", 0, 500),
(Events.ITERATION_COMPLETED, "iteration", 300, 200),
(Events.ITERATION_COMPLETED, "iteration", 500, 0),
(Events.EPOCH_STARTED, "epoch", 0, 5),
(Events.EPOCH_COMPLETED, "epoch", 3, 2),
(Events.EPOCH_COMPLETED, "epoch", 5, 0),
],
)
def test_after_event_filter_with_engine(event_name, event_attr, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(after=after))
def _after_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) > after
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, before, after, expect_calls",
[(Events.ITERATION_STARTED, "iteration", 300, 100, 199), (Events.EPOCH_COMPLETED, "epoch", 4, 1, 2)],
)
def test_before_and_after_event_filter_with_engine(event_name, event_attr, before, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(before=before, after=after))
def _before_and_after_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) > after
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, every, before, after, expect_calls",
[(Events.ITERATION_STARTED, "iteration", 5, 25, 8, 4), (Events.EPOCH_COMPLETED, "epoch", 2, 5, 1, 2)],
)
def test_every_before_and_after_event_filter_with_engine(event_name, event_attr, every, before, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(every=every, before=before, after=after))
def _every_before_and_after_event():
assert getattr(engine.state, event_attr) > after
assert getattr(engine.state, event_attr) < before
assert ((getattr(engine.state, event_attr) - after - 1) % every) == 0
nonlocal num_calls
num_calls += 1
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, once, expect_calls",
[
(Events.ITERATION_STARTED, "iteration", 2, 1),
(Events.ITERATION_COMPLETED, "iteration", 2, 1),
(Events.EPOCH_STARTED, "epoch", 2, 1),
(Events.EPOCH_COMPLETED, "epoch", 2, 1),
(Events.ITERATION_STARTED, "iteration", [1, 5], 2),
(Events.ITERATION_COMPLETED, "iteration", [1, 5], 2),
(Events.EPOCH_STARTED, "epoch", [1, 5], 2),
(Events.EPOCH_COMPLETED, "epoch", [1, 5], 2),
],
)
def test_once_event_filter(event_name, event_attr, once, expect_calls):
data = list(range(100))
engine = Engine(lambda e, b: b)
num_calls = [0]
counter = [0]
test_once = [once] if isinstance(once, int) else once
@engine.on(event_name(once=once))
def assert_once(engine):
assert getattr(engine.state, event_attr) in test_once
num_calls[0] += 1
@engine.on(event_name)
def assert_(engine):
counter[0] += 1
assert getattr(engine.state, event_attr) == counter[0]
engine.run(data, max_epochs=10)
assert num_calls[0] == expect_calls
def test_custom_event_filter_with_engine():
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
def _test(event_name, event_attr, true_num_calls):
engine = Engine(lambda e, b: b)
num_calls = [0]
@engine.on(event_name(event_filter=custom_event_filter))
def assert_on_special_event(engine):
assert getattr(engine.state, event_attr) == special_events.pop(0)
num_calls[0] += 1
d = list(range(50))
engine.run(d, max_epochs=25)
assert num_calls[0] == true_num_calls
_test(Events.ITERATION_STARTED, "iteration", len(special_events))
_test(Events.ITERATION_COMPLETED, "iteration", len(special_events))
_test(Events.EPOCH_STARTED, "epoch", len(special_events))
_test(Events.EPOCH_COMPLETED, "epoch", len(special_events))
def test_callable_event_bad_behaviour():
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
# Check bad behaviour
engine = Engine(lambda e, b: b)
counter = [0]
# Modify events
Events.ITERATION_STARTED(event_filter=custom_event_filter)
@engine.on(Events.ITERATION_STARTED)
def assert_all_iters(engine):
counter[0] += 1
assert engine.state.iteration == counter[0]
d = list(range(50))
engine.run(d, max_epochs=25)
assert counter[0] == engine.state.iteration
def test_custom_callable_events():
class CustomEvents(Enum):
TEST_EVENT = "test_event"
with pytest.raises(TypeError, match=r"object is not callable"):
CustomEvents.TEST_EVENT(every=10)
class CustomEvents2(EventEnum):
TEST_EVENT = "test_event"
CustomEvents2.TEST_EVENT(every=10)
def test_custom_callable_events_with_engine():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
event_to_attr = {CustomEvents.TEST_EVENT: "test_event"}
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
def _test(event_name, event_attr, true_num_calls):
def update_fn(engine, batch):
engine.state.test_event = engine.state.iteration
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(update_fn)
engine.register_events(*CustomEvents, event_to_attr=event_to_attr)
num_calls = [0]
@engine.on(event_name(event_filter=custom_event_filter))
def assert_on_special_event(engine):
assert getattr(engine.state, event_attr) == special_events.pop(0)
num_calls[0] += 1
d = list(range(50))
engine.run(d, max_epochs=25)
assert num_calls[0] == true_num_calls
_test(CustomEvents.TEST_EVENT, "test_event", len(special_events))
def _test_every_event_filter_with_engine_with_dataloader(device):
def _test(num_workers):
max_epochs = 3
batch_size = 4
num_iters = 21
data = torch.randint(0, 1000, size=(num_iters * batch_size,))
dataloader = torch.utils.data.DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
drop_last=True,
shuffle=True,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = Engine(update_fn)
def foo(_):
pass
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo)
engine.run(dataloader, max_epochs=max_epochs)
engine = None
import gc
gc.collect()
assert len(gc.garbage) == 0
_test(num_workers=0)
_test(num_workers=1)
def test_every_event_filter_with_engine_with_dataloader():
_test_every_event_filter_with_engine_with_dataloader("cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_every_event_filter_with_engine(device)
_test_every_event_filter_with_engine_with_dataloader(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_every_event_filter_with_engine(device)
_test_every_event_filter_with_engine_with_dataloader(device)
def test_event_list():
e1 = Events.ITERATION_STARTED(once=1)
e2 = Events.ITERATION_STARTED(every=3)
e3 = Events.COMPLETED
event_list = e1 | e2 | e3
assert isinstance(event_list, EventsList)
assert len(event_list) == 3
assert event_list[0] == e1
assert event_list[1] == e2
assert event_list[2] == e3
def test_list_of_events():
def _test(event_list, true_iterations):
engine = Engine(lambda e, b: b)
iterations = []
num_calls = [0]
@engine.on(event_list)
def execute_some_handler(e):
iterations.append(e.state.iteration)
num_calls[0] += 1
engine.run(range(3), max_epochs=5)
assert iterations == true_iterations
assert num_calls[0] == len(true_iterations)
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(once=1), [1, 1])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(once=10), [1, 10])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(every=3), [1, 3, 6, 9, 12, 15])
_test(Events.ITERATION_STARTED(once=8) | Events.ITERATION_STARTED(before=3), [1, 2, 8])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(after=12), [1, 13, 14, 15])
|
import os
from importlib.util import find_spec
from typing import Optional, Union
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
import torch
from packaging.version import Version
from pytest import approx
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import (
_check_arg,
create_supervised_evaluator,
create_supervised_trainer,
Engine,
Events,
supervised_evaluation_step,
supervised_evaluation_step_amp,
supervised_training_step_tpu,
)
from ignite.metrics import MeanSquaredError
class DummyModel(torch.nn.Module):
def __init__(self, output_as_list=False):
super(DummyModel, self).__init__()
self.output_as_list = output_as_list
self.fc = torch.nn.Linear(1, 1, bias=False)
def forward(self, x):
if self.output_as_list:
return self.fc(x), self.fc(x)
return self.fc(x)
def _default_create_supervised_trainer(
gradient_accumulation_steps: int = 1,
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
with_model_transform: bool = False,
):
if with_model_transform:
def get_first_element(output):
return output[0]
model = DummyModel(output_as_list=True)
model_transform = get_first_element
else:
model = DummyModel()
model_transform = None
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
if trace:
example_input = torch.randn(1)
model = torch.jit.trace(model, example_input)
if amp_mode == "apex" and model_device == trainer_device == "cuda":
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
trainer = create_supervised_trainer(
model,
optimizer,
mse_loss,
device=trainer_device,
output_transform=lambda x, y, y_pred, loss: (y_pred, loss.item()),
amp_mode=amp_mode,
scaler=scaler,
gradient_accumulation_steps=gradient_accumulation_steps,
model_transform=model_transform if model_transform is not None else lambda x: x,
)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
return trainer, model
def _test_create_supervised_trainer(
gradient_accumulation_steps: int = 1,
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
with_model_transform: bool = False,
):
trainer, model = _default_create_supervised_trainer(
gradient_accumulation_steps=gradient_accumulation_steps,
model_device=model_device,
trainer_device=trainer_device,
trace=trace,
amp_mode=amp_mode,
scaler=scaler,
with_model_transform=with_model_transform,
)
x = torch.tensor([[0.01], [0.02], [0.03], [0.04], [0.05]])
y = torch.tensor([[0.015], [0.025], [0.035], [0.045], [0.055]])
data = [(_x, _y) for _x, _y in zip(x, y)]
theta = [0.0]
accumulation = [0.0]
loss = [0.0]
@trainer.on(Events.ITERATION_COMPLETED)
def _():
assert model.fc.weight.grad != 0
_x, _y = trainer.state.batch
_x, _y = _x.to(model_device), _y.to(model_device)
accumulation[0] += 0.2 * _x.item() * (theta[0] * _x.item() - _y.item())
# value of loss should not be accumulated
if with_model_transform:
loss[0] = mse_loss(model(_x)[0], _y).item()
else:
loss[0] = mse_loss(model(_x), _y).item()
@trainer.on(Events.ITERATION_COMPLETED(every=gradient_accumulation_steps))
def _():
theta[0] -= accumulation[0] / gradient_accumulation_steps
assert pytest.approx(model.fc.weight.data[0, 0].item(), abs=1.0e-5) == theta[0]
assert pytest.approx(trainer.state.output[-1], abs=1e-5) == loss[0]
accumulation[0] = loss[0] = 0.0
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
state = trainer.run(data)
if amp_mode == "amp":
assert state.output[0].dtype is torch.half
if scaler and isinstance(scaler, bool):
assert hasattr(state, "scaler")
else:
assert not hasattr(state, "scaler")
else:
if Version(torch.__version__) >= Version("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"Expected all tensors to be on the same device"):
trainer.run(data)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
def test_create_supervised_training_scalar_assignment():
with mock.patch("ignite.engine._check_arg") as check_arg_mock:
check_arg_mock.return_value = None, torch.cuda.amp.GradScaler(enabled=False)
trainer, _ = _default_create_supervised_trainer(model_device="cpu", trainer_device="cpu", scaler=True)
assert hasattr(trainer.state, "scaler")
assert isinstance(trainer.state.scaler, torch.cuda.amp.GradScaler)
def _test_create_mocked_supervised_trainer(
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
):
with mock.patch("ignite.engine.supervised_training_step_amp") as training_step_amp_mock:
with mock.patch("ignite.engine.supervised_training_step_apex") as training_step_apex_mock:
with mock.patch("ignite.engine.supervised_training_step_tpu") as training_step_tpu_mock:
with mock.patch("ignite.engine.supervised_training_step") as training_step_mock:
trainer, _ = _default_create_supervised_trainer(
model_device=model_device,
trainer_device=trainer_device,
trace=trace,
amp_mode=amp_mode,
scaler=scaler,
)
x = torch.tensor([[0.1], [0.2]])
y = torch.tensor([[0.3], [0.5]])
data = [(x, y)]
on_tpu = "xla" in trainer_device if trainer_device is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, scaler)
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
trainer.run(data)
if mode == "amp":
assert training_step_amp_mock.called
elif mode == "apex":
assert training_step_apex_mock.called
elif mode == "tpu":
assert training_step_tpu_mock.called
else:
assert training_step_mock.called
def _test_create_supervised_trainer_wrong_accumulation(
model_device=None, trainer_device=None, amp_mode=None, trace=False
):
with pytest.raises(ValueError, match="Gradient_accumulation_steps must be strictly positive."):
_default_create_supervised_trainer(
gradient_accumulation_steps=0,
model_device=model_device,
trainer_device=trainer_device,
amp_mode=amp_mode,
trace=trace,
)
def _default_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
with_model_transform: bool = False,
):
if with_model_transform:
def get_first_element(output):
return output[0]
model = DummyModel(output_as_list=True)
model_transform = get_first_element
else:
model = DummyModel()
model_transform = None
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
evaluator = create_supervised_evaluator(
model,
device=evaluator_device,
amp_mode=amp_mode,
model_transform=model_transform if model_transform is not None else lambda x: x,
)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
return model, evaluator
def _test_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
with_model_transform: bool = False,
):
model, evaluator = _default_create_supervised_evaluator(
model_device=model_device,
evaluator_device=evaluator_device,
trace=trace,
amp_mode=amp_mode,
with_model_transform=with_model_transform,
)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
state = evaluator.run(data)
y_pred, y = state.output
assert y_pred[0, 0].item() == approx(0.0)
assert y_pred[1, 0].item() == approx(0.0)
assert y[0, 0].item() == approx(3.0)
assert y[1, 0].item() == approx(5.0)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
else:
if Version(torch.__version__) >= Version("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"Expected all tensors to be on the same device"):
evaluator.run(data)
def _test_mocked_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
with mock.patch("ignite.engine.supervised_evaluation_step") as evaluation_step:
with mock.patch("ignite.engine.supervised_evaluation_step_amp") as evaluation_step_amp:
_, evaluator = _default_create_supervised_evaluator(
model_device=model_device, evaluator_device=evaluator_device, trace=trace, amp_mode=amp_mode
)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
evaluator.run(data)
if amp_mode == "amp":
assert evaluation_step_amp.called
assert not evaluation_step.called
else:
assert evaluation_step.called
assert not evaluation_step_amp.called
def _test_create_evaluation_step_amp(
autocast_mock,
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
output_transform_mock = MagicMock()
model = DummyModel()
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
device_type = evaluator_device.type if isinstance(evaluator_device, torch.device) else evaluator_device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
evaluate_step = supervised_evaluation_step_amp(model, evaluator_device, output_transform=output_transform_mock)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
evaluator = Engine(evaluate_step)
evaluator.run(data)
assert autocast_mock.called
assert output_transform_mock.called
def _test_create_evaluation_step(
mock_torch_cuda_amp_module,
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
output_transform_mock = MagicMock()
model = DummyModel()
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
device_type = evaluator_device.type if isinstance(evaluator_device, torch.device) else evaluator_device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
evaluate_step = supervised_evaluation_step(model, evaluator_device, output_transform=output_transform_mock)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
evaluator = Engine(evaluate_step)
evaluator.run(data)
assert not mock_torch_cuda_amp_module.called
assert output_transform_mock.called
@pytest.mark.parametrize("trainer_device", [None, "cpu"])
@pytest.mark.parametrize("trace", [False, True])
def test_create_supervised_trainer(trainer_device, trace):
_test_create_supervised_trainer_wrong_accumulation(trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(gradient_accumulation_steps=1, trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(gradient_accumulation_steps=3, trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(with_model_transform=True, trainer_device=trainer_device, trace=trace)
_test_create_mocked_supervised_trainer(trainer_device=trainer_device, trace=trace)
@pytest.mark.skipif(find_spec("apex"), reason="Skip if APEX")
def test_create_supervised_trainer_apex_error():
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cpu", amp_mode="apex")
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer(amp_mode="apex")
@pytest.fixture
def mock_torch_cuda_amp_module():
with patch.dict(
"sys.modules",
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.cuda.amp.autocast_mode": None},
):
yield torch
def test_create_supervised_trainer_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cpu", amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer(amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use scaler argument."):
_test_create_supervised_trainer(amp_mode="amp", scaler=True)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.5.0"), reason="Skip if < 1.5.0")
def test_create_supervised_trainer_scaler_not_amp():
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=scaler)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=True)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=True)
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=scaler)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(model_device=model_device, trainer_device=trainer_device)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp_scaler():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=True,
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=True,
)
_test_create_mocked_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=True
)
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
_test_create_supervised_trainer(
gradient_accumulation_steps=1,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=scaler,
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=scaler,
)
_test_create_mocked_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=scaler
)
# @pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
# @pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
@pytest.mark.skip(reason="Temporarily disabled, as it fails because of an issue from apex side")
def test_create_supervised_trainer_on_cuda_apex():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex")
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_supervised_training_step_tpu_no_xla():
with pytest.raises(ModuleNotFoundError, match="torch_xla cannot be imported, please install PyTorch XLA."):
supervised_training_step_tpu(model=None, optimizer=None, loss_fn=None)
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_no_xla():
model_device = "cpu"
trainer_device = "xla"
with pytest.raises(RuntimeError, match=r"In order to run on TPU, please install PyTorch XLA"):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu():
model_device = trainer_device = "xla"
_test_create_supervised_trainer_wrong_accumulation(model_device=model_device, trainer_device=trainer_device)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_amp():
model_device = trainer_device = "xla"
with pytest.raises(ValueError, match="amp_mode cannot be used with xla device."):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_with_model_on_cpu():
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cuda")
_test_create_supervised_trainer(gradient_accumulation_steps=1, trainer_device="cuda")
_test_create_supervised_trainer(gradient_accumulation_steps=3, trainer_device="cuda")
_test_create_mocked_supervised_trainer(trainer_device="cuda")
def test_create_supervised_evaluator():
_test_create_supervised_evaluator()
_test_mocked_supervised_evaluator()
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module)
def test_create_supervised_evaluator_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu")
_test_mocked_supervised_evaluator(evaluator_device="cpu")
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu")
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module, evaluator_device="cpu")
def test_create_supervised_evaluator_traced_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu", trace=True)
_test_mocked_supervised_evaluator(evaluator_device="cpu", trace=True)
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu", trace=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
_test_mocked_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_with_model_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cuda")
_test_mocked_supervised_evaluator(evaluator_device="cuda")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_amp():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
_test_mocked_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
def test_create_supervised_evaluator_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_evaluator(amp_mode="amp")
def test_create_supervised_evaluator_with_metrics():
model = DummyModel()
model.fc.weight.data.zero_()
evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()})
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [4.0]])
data = [(x, y)]
state = evaluator.run(data)
assert state.metrics["mse"] == 12.5
|
import functools
import gc
from unittest.mock import call, create_autospec, MagicMock
import pytest
from pytest import raises
from ignite.engine import Engine, Events, State
from ignite.engine.events import EventsList
class DummyEngine(Engine):
def __init__(self):
super(DummyEngine, self).__init__(lambda e, b: 1)
def run(self, num_times):
self.state = State()
for _ in range(num_times):
self.fire_event(Events.STARTED)
self.fire_event(Events.COMPLETED)
return self.state
def test_add_event_handler_raises_with_invalid_event():
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"is not a valid event for this Engine"):
engine.add_event_handler("incorrect", lambda engine: None)
def test_add_event_handler_raises_with_invalid_signature():
engine = Engine(MagicMock())
def handler(engine):
pass
engine.add_event_handler(Events.STARTED, handler)
engine.add_event_handler(Events.STARTED, handler, 1)
def handler_with_args(engine, a):
pass
engine.add_event_handler(Events.STARTED, handler_with_args, 1)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_args)
def handler_with_kwargs(engine, b=42):
pass
engine.add_event_handler(Events.STARTED, handler_with_kwargs, b=2)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_kwargs, c=3)
engine.add_event_handler(Events.STARTED, handler_with_kwargs, 1, b=2)
def handler_with_args_and_kwargs(engine, a, b=42):
pass
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, b=2)
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, 2, b=2)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, b=2, c=3)
def test_add_event_handler():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
def handle_iteration_started(engine, counter):
counter.count += 1
engine.add_event_handler(Events.STARTED, handle_iteration_started, started_counter)
completed_counter = Counter()
def handle_iteration_completed(engine, counter):
counter.count += 1
engine.add_event_handler(Events.COMPLETED, handle_iteration_completed, completed_counter)
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_add_event_handler_without_engine():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
def handle_iteration_started():
started_counter.count += 1
engine.add_event_handler(Events.STARTED, handle_iteration_started)
completed_counter = Counter()
def handle_iteration_completed(counter):
counter.count += 1
engine.add_event_handler(Events.COMPLETED, handle_iteration_completed, completed_counter)
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_adding_multiple_event_handlers():
mock_fn_1 = create_autospec(spec=lambda x: None)
mock_fn_2 = create_autospec(spec=lambda x: None)
engine = DummyEngine()
handlers = [mock_fn_1, mock_fn_2]
for handler in handlers:
engine.add_event_handler(Events.STARTED, handler)
engine.run(1)
for handler in handlers:
handler.assert_called_once_with(engine)
@pytest.mark.parametrize(
"event1, event2",
[
(Events.STARTED, Events.COMPLETED),
(Events.EPOCH_STARTED, Events.EPOCH_COMPLETED),
(Events.ITERATION_STARTED, Events.ITERATION_COMPLETED),
(Events.ITERATION_STARTED(every=2), Events.ITERATION_COMPLETED(every=2)),
],
)
def test_event_removable_handle(event1, event2):
# Removable handle removes event from engine.
engine = Engine(lambda e, b: None)
handler = create_autospec(spec=lambda x: None)
assert not hasattr(handler, "_parent")
removable_handle = engine.add_event_handler(event1, handler)
assert engine.has_event_handler(handler, event1)
engine.run([1, 2])
handler.assert_any_call(engine)
num_calls = handler.call_count
removable_handle.remove()
assert not engine.has_event_handler(handler, event1)
# Second engine pass does not fire handle again.
engine.run([1, 2])
# Assert that handler wasn't call
assert handler.call_count == num_calls
# Removable handle can be used as a context manager
handler = create_autospec(spec=lambda x: None)
with engine.add_event_handler(event1, handler):
assert engine.has_event_handler(handler, event1)
engine.run([1, 2])
assert not engine.has_event_handler(handler, event1)
handler.assert_any_call(engine)
num_calls = handler.call_count
engine.run([1, 2])
# Assert that handler wasn't call
assert handler.call_count == num_calls
# Removeable handle only effects a single event registration
handler = MagicMock(spec_set=True)
with engine.add_event_handler(event1, handler):
with engine.add_event_handler(event2, handler):
assert engine.has_event_handler(handler, event1)
assert engine.has_event_handler(handler, event2)
assert engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event2)
assert not engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event2)
# Removeable handle is re-enter and re-exitable
handler = MagicMock(spec_set=True)
remove = engine.add_event_handler(event1, handler)
with remove:
with remove:
assert engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event1)
# Removeable handle is a weakref, does not keep engine or event alive
def _add_in_closure():
_engine = Engine(lambda e, b: None)
def _handler(_):
pass
_handle = _engine.add_event_handler(event1, _handler)
assert _handle.engine() is _engine
if event1.filter is None:
assert _handle.handler() is _handler
else:
assert _handle.handler()._parent() is _handler
return _handle
removable_handle = _add_in_closure()
# gc.collect, resolving reference cycles in engine/state
# required to ensure object deletion in python2
gc.collect()
assert removable_handle.engine() is None
assert removable_handle.handler() is None
def test_events_list_removable_handle():
# Removable handle removes event from engine.
engine = DummyEngine()
handler = create_autospec(spec=lambda x: None)
assert not hasattr(handler, "_parent")
events_list = Events.STARTED | Events.COMPLETED
removable_handle = engine.add_event_handler(events_list, handler)
for e in events_list:
assert engine.has_event_handler(handler, e)
engine.run(1)
calls = [call(engine), call(engine)]
handler.assert_has_calls(calls)
assert handler.call_count == 2
removable_handle.remove()
for e in events_list:
assert not engine.has_event_handler(handler, e)
# Second engine pass does not fire handle again.
engine.run(1)
handler.assert_has_calls(calls)
assert handler.call_count == 2
# Removable handle can be used as a context manager
handler = create_autospec(spec=lambda x: None)
with engine.add_event_handler(events_list, handler):
for e in events_list:
assert engine.has_event_handler(handler, e)
engine.run(1)
for e in events_list:
assert not engine.has_event_handler(handler, e)
handler.assert_has_calls(calls)
assert handler.call_count == 2
engine.run(1)
handler.assert_has_calls(calls)
assert handler.call_count == 2
# Removeable handle only effects a single event registration
handler = create_autospec(spec=lambda x: None)
other_events_list = Events.EPOCH_STARTED | Events.EPOCH_COMPLETED
with engine.add_event_handler(events_list, handler):
with engine.add_event_handler(other_events_list, handler):
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in other_events_list:
assert engine.has_event_handler(handler, e)
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in other_events_list:
assert not engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
for e in other_events_list:
assert not engine.has_event_handler(handler, e)
# Removeable handle is re-enter and re-exitable
handler = create_autospec(spec=lambda x: None)
remove = engine.add_event_handler(events_list, handler)
with remove:
with remove:
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
# Removeable handle is a weakref, does not keep engine or event alive
def _add_in_closure():
_engine = DummyEngine()
def _handler(_):
pass
_handle = _engine.add_event_handler(events_list, _handler)
assert _handle.engine() is _engine
assert _handle.handler() is _handler
return _handle
removable_handle = _add_in_closure()
# gc.collect, resolving reference cycles in engine/state
# required to ensure object deletion in python2
gc.collect()
assert removable_handle.engine() is None
assert removable_handle.handler() is None
def test_eventslist__append_raises():
ev_list = EventsList()
with pytest.raises(TypeError, match=r"Argument event should be Events or CallableEventWithFilter"):
ev_list._append("abc")
def test_has_event_handler():
engine = DummyEngine()
handlers = [MagicMock(spec_set=True), MagicMock(spec_set=True)]
m = MagicMock(spec_set=True)
for handler in handlers:
engine.add_event_handler(Events.STARTED, handler)
engine.add_event_handler(Events.COMPLETED, m)
for handler in handlers:
assert engine.has_event_handler(handler, Events.STARTED)
assert engine.has_event_handler(handler)
assert not engine.has_event_handler(handler, Events.COMPLETED)
assert not engine.has_event_handler(handler, Events.EPOCH_STARTED)
assert not engine.has_event_handler(m, Events.STARTED)
assert engine.has_event_handler(m, Events.COMPLETED)
assert engine.has_event_handler(m)
assert not engine.has_event_handler(m, Events.EPOCH_STARTED)
def test_remove_event_handler():
engine = DummyEngine()
with pytest.raises(ValueError, match=r"Input event name"):
engine.remove_event_handler(lambda x: x, "an event")
def on_started(engine):
return 0
engine.add_event_handler(Events.STARTED, on_started)
with pytest.raises(ValueError, match=r"Input handler"):
engine.remove_event_handler(lambda x: x, Events.STARTED)
h1 = MagicMock(spec_set=True)
h2 = MagicMock(spec_set=True)
handlers = [h1, h2]
m = MagicMock(spec_set=True)
for handler in handlers:
engine.add_event_handler(Events.EPOCH_STARTED, handler)
engine.add_event_handler(Events.EPOCH_COMPLETED, m)
assert len(engine._event_handlers[Events.EPOCH_STARTED]) == 2
engine.remove_event_handler(h1, Events.EPOCH_STARTED)
assert len(engine._event_handlers[Events.EPOCH_STARTED]) == 1
assert engine._event_handlers[Events.EPOCH_STARTED][0][0] == h2
assert len(engine._event_handlers[Events.EPOCH_COMPLETED]) == 1
engine.remove_event_handler(m, Events.EPOCH_COMPLETED)
assert len(engine._event_handlers[Events.EPOCH_COMPLETED]) == 0
def test_args_and_kwargs_are_passed_to_event():
engine = DummyEngine()
kwargs = {"a": "a", "b": "b"}
args = (1, 2, 3)
handlers = []
for event in [Events.STARTED, Events.COMPLETED]:
handler = create_autospec(spec=lambda e, x1, x2, x3, a, b: None)
engine.add_event_handler(event, handler, *args, **kwargs)
handlers.append(handler)
engine.run(1)
called_handlers = [handle for handle in handlers if handle.called]
assert len(called_handlers) == 2
for handler in called_handlers:
handler_args, handler_kwargs = handler.call_args
assert handler_args[0] == engine
assert handler_args[1::] == args
assert handler_kwargs == kwargs
def test_on_decorator_raises_with_invalid_event():
engine = DummyEngine()
with pytest.raises(ValueError):
@engine.on("incorrect")
def f(engine):
pass
def test_on_decorator():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
@engine.on(Events.STARTED, started_counter)
def handle_iteration_started(engine, started_counter):
started_counter.count += 1
completed_counter = Counter()
@engine.on(Events.COMPLETED, completed_counter)
def handle_iteration_completed(engine, completed_counter):
completed_counter.count += 1
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_returns_state():
engine = Engine(MagicMock(return_value=1))
state = engine.run([0])
assert isinstance(state, State)
def test_state_attributes():
dataloader = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
state = engine.run(dataloader, max_epochs=3)
assert state.iteration == 9
assert state.output == 1
assert state.batch == 3
assert state.dataloader == dataloader
assert state.epoch == 3
assert state.max_epochs == 3
assert state.metrics == {}
with pytest.raises(RuntimeError, match=r"Unknown event name"):
state.get_event_attrib_value("abc")
def test_default_exception_handler():
update_function = MagicMock(side_effect=ValueError())
engine = Engine(update_function)
with raises(ValueError):
engine.run([1])
def test_custom_exception_handler():
value_error = ValueError()
update_function = MagicMock(side_effect=value_error)
engine = Engine(update_function)
class ExceptionCounter(object):
def __init__(self):
self.exceptions = []
def __call__(self, engine, e):
self.exceptions.append(e)
counter = ExceptionCounter()
engine.add_event_handler(Events.EXCEPTION_RAISED, counter)
engine.run([1])
# only one call from _run_once_over_data, since the exception is swallowed
assert len(counter.exceptions) == 1 and counter.exceptions[0] == value_error
def test_event_handlers_with_decoration():
engine = Engine(lambda e, b: b)
def decorated(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
return fun(*args, **kwargs)
return wrapper
values = []
def foo():
values.append("foo")
@decorated
def decorated_foo():
values.append("decorated_foo")
engine.add_event_handler(Events.EPOCH_STARTED, foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo)
engine.add_event_handler(Events.EPOCH_STARTED, decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), decorated_foo)
def foo_args(e):
values.append("foo_args")
values.append(e.state.iteration)
@decorated
def decorated_foo_args(e):
values.append("decorated_foo_args")
values.append(e.state.iteration)
engine.add_event_handler(Events.EPOCH_STARTED, foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo_args)
engine.add_event_handler(Events.EPOCH_STARTED, decorated_foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), decorated_foo_args)
class Foo:
def __init__(self):
self.values = []
def foo(self):
self.values.append("foo")
@decorated
def decorated_foo(self):
self.values.append("decorated_foo")
def foo_args(self, e):
self.values.append("foo_args")
self.values.append(e.state.iteration)
@decorated
def decorated_foo_args(self, e):
self.values.append("decorated_foo_args")
self.values.append(e.state.iteration)
foo = Foo()
engine.add_event_handler(Events.EPOCH_STARTED, foo.foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo.decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo.foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.foo_args)
engine.add_event_handler(Events.EPOCH_STARTED, foo.decorated_foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.decorated_foo_args)
engine.run([0], max_epochs=2)
assert values == foo.values
|
import sys
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import Timer
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def test_timer():
sleep_t = 0.2
n_iter = 3
def _train_func(engine, batch):
time.sleep(sleep_t)
def _test_func(engine, batch):
time.sleep(sleep_t)
trainer = Engine(_train_func)
tester = Engine(_test_func)
t_total = Timer()
t_batch = Timer(average=True)
t_train = Timer()
t_total.attach(trainer)
t_batch.attach(
trainer, pause=Events.ITERATION_COMPLETED, resume=Events.ITERATION_STARTED, step=Events.ITERATION_COMPLETED
)
t_train.attach(trainer, pause=Events.EPOCH_COMPLETED, resume=Events.EPOCH_STARTED)
@trainer.on(Events.EPOCH_COMPLETED)
def run_validation(trainer):
tester.run(range(n_iter))
# Run "training"
trainer.run(range(n_iter))
assert pytest.approx(t_total.value(), abs=1e-1) == 2 * n_iter * sleep_t
assert pytest.approx(t_batch.value(), abs=1e-1) == sleep_t
assert pytest.approx(t_train.value(), abs=1e-1) == n_iter * sleep_t
t_total.reset()
assert pytest.approx(t_total.value(), abs=1e-1) == 0.0
|
import pytest
import torch
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
def get_dummy_model(with_grads=True, with_frozen_layer=False):
model = DummyModel()
if with_grads:
model.fc2.weight.grad = torch.zeros_like(model.fc2.weight)
model.fc2.bias.grad = torch.zeros_like(model.fc2.bias)
if not with_frozen_layer:
model.fc1.weight.grad = torch.zeros_like(model.fc1.weight)
model.fc1.bias.grad = torch.zeros_like(model.fc1.bias)
if with_frozen_layer:
for param in model.fc1.parameters():
param.requires_grad = False
return model
return get_dummy_model
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def do_nothing_update_fn(engine, batch):
pass
def test_args_validation():
trainer = Engine(do_nothing_update_fn)
with pytest.raises(ValueError, match=r"Argument patience should be positive integer."):
EarlyStopping(patience=-1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(ValueError, match=r"Argument min_delta should not be a negative number."):
EarlyStopping(patience=2, min_delta=-0.1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument score_function should be a function."):
EarlyStopping(patience=2, score_function=12345, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument trainer should be an instance of Engine."):
EarlyStopping(patience=2, score_function=lambda engine: 0, trainer=None)
def test_simple_early_stopping():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_state_dict():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
# Swap to new object, but maintain state
h2 = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
h2.load_state_dict(h.state_dict())
h2(None)
assert not trainer.should_terminate
h2(None)
assert trainer.should_terminate
def test_early_stopping_on_delta():
scores = iter([1.0, 2.0, 2.01, 3.0, 3.01, 3.02])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, min_delta=0.1, score_function=lambda _: next(scores), trainer=trainer)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 1.0; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.99; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_last_event_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=False, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_cumulative_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=True, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.6; counter == 0
assert not trainer.should_terminate
def test_simple_early_stopping_on_plateau():
def score_function(engine):
return 42
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=1, score_function=score_function, trainer=trainer)
# Call 2 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_simple_no_early_stopping():
scores = iter([1.0, 0.8, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if not stopped
assert not trainer.should_terminate
h(None)
h(None)
h(None)
assert not trainer.should_terminate
def test_with_engine_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 7
assert trainer.state.epoch == 7
def test_with_engine_early_stopping_on_plateau():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
def score_function(engine):
return 0.047
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=4, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 5
assert trainer.state.epoch == 5
def test_with_engine_no_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.23, 0.9, 1.0, 1.1, 1.253, 1.26, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=5, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 10
assert trainer.state.epoch == 10
def _test_distrib_with_engine_early_stopping(device):
if device is None:
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
torch.manual_seed(12)
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = torch.tensor([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9], requires_grad=False).to(device)
def score_function(engine):
i = trainer.state.epoch - 1
v = scores[i]
idist.all_reduce(v)
v /= idist.get_world_size()
return v.item()
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 7
assert n_epochs_counter.count == 7
def _test_distrib_integration_engine_early_stopping(device):
from ignite.metrics import Accuracy
if device is None:
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
metric_device = device
if device.type == "xla":
metric_device = "cpu"
rank = idist.get_rank()
ws = idist.get_world_size()
torch.manual_seed(12)
n_epochs = 10
n_iters = 20
y_preds = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
y_true = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
def update(engine, _):
e = trainer.state.epoch - 1
i = engine.state.iteration - 1
return y_preds[e][i, rank], y_true[e][i, rank]
evaluator = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(evaluator, "acc")
def score_function(engine):
return engine.state.metrics["acc"]
trainer = Engine(lambda e, b: None)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
data = list(range(n_iters))
evaluator.run(data=data)
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 5
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_with_engine_early_stopping, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_engine_early_stopping, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from unittest.mock import MagicMock
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
def test_global_step_from_engine():
iteration = 12
epoch = 23
trainer = Engine(lambda e, b: None)
trainer.state.iteration = iteration
trainer.state.epoch = epoch
gst = global_step_from_engine(trainer)
assert gst(MagicMock(), Events.EPOCH_COMPLETED) == epoch
gst = global_step_from_engine(trainer, custom_event_name=Events.ITERATION_COMPLETED)
assert gst(MagicMock(), Events.EPOCH_COMPLETED) == iteration
|
import os
from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EMAHandler
def _get_dummy_model() -> nn.Module:
model = nn.Linear(2, 1, bias=False)
model.weight.data.fill_(1)
return model
def _unwrap_model(model):
if isinstance(model, (DataParallel, DistributedDataParallel)):
return model.module
else:
return model
@pytest.fixture(scope="module")
def get_dummy_model():
"""Returns a function since the fixture is needed multiple times in a single test"""
yield _get_dummy_model
def _get_dummy_step_fn(model: Union[nn.Module, DataParallel, DistributedDataParallel]) -> Callable:
"""Get a dummy step function, given model is a (wrapper of) dummy model returned from _get_dummy_model"""
def step_fn(engine, batch):
"""Increment the weight by 1 at each iteration"""
_unwrap_model(model).weight.data.add_(1)
return 0
return step_fn
@pytest.mark.parametrize("momentum", [-1, 2])
def test_ema_invalid_momentum(get_dummy_model, momentum):
with pytest.raises(ValueError, match="Invalid momentum"):
EMAHandler(get_dummy_model(), momentum=momentum)
def test_has_momentum_scheduler(get_dummy_model):
"""Test the handler has attribute `momentum_scheduler` and `_momentum_lambda_obj`"""
momentum_warmup = 0.0
warmup_iters = 10
ema_handler = EMAHandler(get_dummy_model(), momentum_warmup=momentum_warmup, warmup_iters=warmup_iters)
assert hasattr(ema_handler, "momentum_scheduler")
assert hasattr(ema_handler, "_momentum_lambda_obj")
def test_ema_warmup_func(get_dummy_model):
"""Test the built-in linear warmup function for the EMA momentum"""
momentum = 0.5
momentum_warmup_1 = 0.0
momentum_warmup_2 = 1.0
warmup_iters = 5
def check_ema_momentum(engine: Engine, momentum_warmup, final_momentum, warmup_iters):
if engine.state.iteration == 1:
assert engine.state.ema_momentum == momentum_warmup
elif engine.state.iteration >= 1 + warmup_iters:
assert engine.state.ema_momentum == final_momentum
else:
min_momentum = min(momentum, momentum_warmup)
max_momentum = max(momentum, momentum_warmup)
assert min_momentum <= engine.state.ema_momentum <= max_momentum
# momentum_warmup < momentum
model_1 = get_dummy_model()
engine_1 = Engine(_get_dummy_step_fn(model_1))
ema_handler_1 = EMAHandler(model_1, momentum, momentum_warmup_1, warmup_iters)
ema_handler_1.attach(engine_1)
engine_1.add_event_handler(
Events.ITERATION_COMPLETED, check_ema_momentum, momentum_warmup_1, momentum, warmup_iters
)
engine_1.run(range(10))
# momentum_warmup > momentum
model_2 = get_dummy_model()
engine_2 = Engine(_get_dummy_step_fn(model_2))
ema_handler_2 = EMAHandler(model_2, momentum, momentum_warmup_2, warmup_iters)
ema_handler_2.attach(engine_2)
engine_2.add_event_handler(
Events.ITERATION_COMPLETED, check_ema_momentum, momentum_warmup_2, momentum, warmup_iters
)
engine_2.run(range(10))
def test_ema_invalid_model():
with pytest.raises(ValueError, match="model should be an instance of nn.Module or its subclasses"):
model = "Invalid Model"
EMAHandler(model) # type: ignore
@pytest.mark.distributed
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_ema_model_on_cuda(get_dummy_model):
"""Test if ema_handler.ema_model is nn.Module or nn.DataParallel and under eval mode"""
model = get_dummy_model().to(idist.device())
model = idist.auto_model(model)
ema_handler = EMAHandler(model)
ema_model = ema_handler.ema_model
assert not ema_model.training
if isinstance(model, DataParallel):
assert isinstance(ema_model, DataParallel)
else:
assert (
isinstance(ema_model, nn.Module)
and (not isinstance(ema_model, DataParallel))
and (not isinstance(ema_model, DistributedDataParallel))
)
def test_ema_load_state_dict(get_dummy_model):
model_1 = get_dummy_model()
model_1.weight.data.fill_(2)
state_dict_1 = model_1.state_dict()
model_2 = get_dummy_model()
ema_handler = EMAHandler(model_2)
ema_model = ema_handler.ema_model
ema_model.load_state_dict(state_dict_1)
assert ema_model.weight.data.allclose(model_1.weight.data)
def test_ema_get_const_momentum(get_dummy_model):
"""Test if momentum retrieved from the engine is constant and equal to the handler's momentum"""
model = get_dummy_model()
step_fn = _get_dummy_step_fn(model)
engine = Engine(step_fn)
def assert_const_momentum(engine: Engine, const_momentum):
assert engine.state.ema_momentum == const_momentum
ema_handler = EMAHandler(model, momentum=0.002)
ema_handler.attach(engine)
engine.add_event_handler(Events.ITERATION_COMPLETED, assert_const_momentum, ema_handler.momentum)
engine.run(range(10))
@pytest.mark.parametrize("handle_buffers", ["copy", "update", "ema_train", "invalid"])
def test_ema_buffer(handle_buffers):
"""Test if the tensors in buffer are also correctly updated"""
model = nn.BatchNorm2d(2)
model.running_mean.data.fill_(1.5)
model.running_var.data.fill_(1.5)
# manually register a buffer to test if it will be correctly updated
model.register_buffer("dummy_buffer", tensor=torch.tensor(1.0, dtype=torch.float32))
if handle_buffers == "invalid":
with pytest.raises(ValueError, match="handle_buffers can only"):
_ = EMAHandler(model, momentum=0.5, handle_buffers=handle_buffers)
else:
ema_handler = EMAHandler(model, momentum=0.5, handle_buffers=handle_buffers)
def _bn_step_fn(engine, batch):
x = torch.rand(4, 2, 32, 32)
_ = model(x)
# manually increment the dummy_buffer at every step
model.dummy_buffer += 1.0
return 1
engine = Engine(_bn_step_fn)
ema_handler.attach(engine)
ema_model = ema_handler.ema_model
if handle_buffers == "ema_train":
assert ema_model.training
else:
assert not ema_model.training
@engine.on(Events.ITERATION_COMPLETED)
def check_buffers():
if handle_buffers == "update":
# the buffers with torch.int64 data type should be directly copied
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
# buffers with floating type will be updated rather than copied
assert not ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert not ema_model.running_mean.allclose(model.running_mean)
assert not ema_model.running_var.allclose(model.running_var)
elif handle_buffers == "copy":
# the buffers with torch.int64 data type should be directly copied
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert ema_model.running_mean.allclose(model.running_mean)
assert ema_model.running_var.allclose(model.running_var)
else:
# buffers will not be copied or EMA updated
assert ema_model.num_batches_tracked.allclose(torch.tensor(0, dtype=torch.int64))
assert ema_model.dummy_buffer.allclose(torch.tensor(1.0, dtype=torch.float32))
# engine will run 4 iterations
engine.run([0, 1], max_epochs=2)
if handle_buffers == "update":
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(torch.tensor(4.0625, dtype=torch.float32))
assert not ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert not ema_model.running_mean.allclose(model.running_mean)
assert not ema_model.running_var.allclose(model.running_var)
elif handle_buffers == "copy":
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert ema_model.running_mean.allclose(model.running_mean)
assert ema_model.running_var.allclose(model.running_var)
else:
# buffers will not be copied or EMA updated
assert ema_model.num_batches_tracked.allclose(torch.tensor(0, dtype=torch.int64))
assert ema_model.dummy_buffer.allclose(torch.tensor(1.0, dtype=torch.float32))
def test_ema_two_handlers(get_dummy_model):
"""Test when two EMA handlers are attached to a trainer"""
model_1 = get_dummy_model()
ema_handler_1 = EMAHandler(model_1, momentum=0.5)
model_2 = get_dummy_model()
ema_handler_2 = EMAHandler(model_2, momentum=0.5)
def _step_fn(engine: Engine, batch: Any):
model_1.weight.data.add_(1)
model_2.weight.data.add_(1)
return 0
engine = Engine(_step_fn)
assert not hasattr(engine.state, "ema_momentum_1")
# handler_1 update EMA model of model_1 every 1 iteration
ema_handler_1.attach(engine, "ema_momentum_1", event=Events.ITERATION_COMPLETED)
assert hasattr(engine.state, "ema_momentum_1")
# handler_2 update EMA model for model_2 every 2 iterations
ema_handler_2.attach(engine, "ema_momentum_2", event=Events.ITERATION_COMPLETED(every=2))
assert hasattr(engine.state, "ema_momentum_2")
# engine will run 4 iterations
engine.run(range(2), max_epochs=2)
# explicitly cast to float32 to avoid test failure on XLA devices
ema_weight_1 = ema_handler_1.ema_model.weight.data.to(torch.float32)
ema_weight_2 = ema_handler_2.ema_model.weight.data.to(torch.float32)
assert ema_weight_1.allclose(ema_weight_1.new_full((1, 2), 4.0625))
assert ema_weight_2.allclose(ema_weight_2.new_full((1, 2), 3.5))
assert engine.state.ema_momentum_1 == 0.5
assert engine.state.ema_momentum_2 == 0.5
model_3 = get_dummy_model()
ema_handler_3 = EMAHandler(model_3)
with pytest.warns(UserWarning, match="Attribute 'ema_momentum_1' already exists"):
ema_handler_3.attach(engine, name="ema_momentum_1")
def _test_ema_final_weight(model, device=None, ddp=False, interval=1):
"""Test if final smoothed weights are correct"""
if device is None:
# let horovod decide the device
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
model = model.to(device)
if ddp:
model = idist.auto_model(model)
step_fn = _get_dummy_step_fn(model)
engine = Engine(step_fn)
ema_handler = EMAHandler(model, momentum=0.5)
ema_handler.attach(engine, "model", event=Events.ITERATION_COMPLETED(every=interval))
# engine will run 4 iterations
engine.run(range(2), max_epochs=2)
# ema_model and model can be DP or DDP
# explicitly cast to float32 to avoid test failure on XLA devices
ema_weight = _unwrap_model(ema_handler.ema_model).weight.data.to(torch.float32)
model_weight = _unwrap_model(model).weight.data.to(torch.float32)
assert ema_weight.device == device
assert model_weight.device == device
if interval == 1:
assert ema_weight.allclose(ema_weight.new_full((1, 2), 4.0625))
elif interval == 2:
assert ema_weight.allclose(ema_weight.new_full((1, 2), 3.5))
else:
pass
assert model_weight.allclose(model_weight.new_full((1, 2), 5.0))
@pytest.mark.parametrize("interval", [1, 2])
def test_ema_final_weight_cpu(get_dummy_model, interval):
device = torch.device("cpu")
_test_ema_final_weight(get_dummy_model(), device=device, ddp=False, interval=interval)
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_final_weight_cuda(get_dummy_model, interval):
device = torch.device("cuda:0")
_test_ema_final_weight(get_dummy_model(), device=device, ddp=False, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_final_weight_distrib_nccl_gpu(get_dummy_model, distributed_context_single_node_nccl, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_ema_final_weight_distrib_gloo_cpu_or_gpu(get_dummy_model, distributed_context_single_node_gloo, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_ema_final_weight_distrib_hvd(get_dummy_model, gloo_hvd_executor, interval):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
# pass device = None to the executor. Different from other distributed tests where the processes are
# already spawn in the context, the processes here will be explicitly spawn by the executor, so we
# pass None to the function, and call idist.device() in side the function to get the corresponding device
gloo_hvd_executor(_test_ema_final_weight, (get_dummy_model(), None, True, interval), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_ema_final_weight_distrib_single_device_xla(get_dummy_model):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_ema_final_weight_distrib_xla_nprocs(get_dummy_model, xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
def _test_ema_final_weight_xla_nprocs(index):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True)
xmp_executor(_test_ema_final_weight_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_ema_final_weight_distrib_multinode_gloo_cpu_or_gpu(
get_dummy_model, distributed_context_multi_node_gloo, interval
):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.multinode_distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_ema_final_weight_distrib_multinode_nccl_gpu(get_dummy_model, distributed_context_multi_node_nccl, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
|
import re
from pathlib import Path
from unittest.mock import patch
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateScheduler,
PiecewiseLinearStateScheduler,
StepStateScheduler,
)
config1 = (3, [(2, 0), (5, 10)], True, [0.0, 0.0, 3.3333333333333335])
expected_hist2 = [0.0] * 10 + [float(i) for i in range(1, 11)] + [10.0] * 10
config2 = (30, [(10, 0), (20, 10)], True, expected_hist2)
config3 = (
PiecewiseLinearStateScheduler,
{"param_name": "linear_scheduled_param", "milestones_values": [(3, 12), (5, 10)], "create_new": True},
)
config4 = (
ExpStateScheduler,
{"param_name": "exp_scheduled_param", "initial_value": 10, "gamma": 0.99, "create_new": True},
)
config5 = (
MultiStepStateScheduler,
{
"param_name": "multistep_scheduled_param",
"initial_value": 10,
"gamma": 0.99,
"milestones": [3, 6],
"create_new": True,
},
)
if Version(torch.__version__) < Version("1.9.0"):
torch_testing_assert_close = torch.testing.assert_allclose
else:
torch_testing_assert_close = torch.testing.assert_close
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
config6 = (
LambdaStateScheduler,
{
"param_name": "custom_scheduled_param",
"lambda_obj": LambdaState(initial_value=10, gamma=0.99),
"create_new": True,
},
)
config7 = (
StepStateScheduler,
{"param_name": "step_scheduled_param", "initial_value": 10, "gamma": 0.99, "step_size": 5, "create_new": True},
)
@pytest.mark.parametrize("max_epochs, milestones_values, save_history, expected_param_history", [config1, config2])
def test_pwlinear_scheduler_linear_increase_history(
max_epochs, milestones_values, save_history, expected_param_history
):
# Testing linear increase
engine = Engine(lambda e, b: None)
pw_linear_step_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param",
milestones_values=milestones_values,
save_history=save_history,
create_new=True,
)
pw_linear_step_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
expected_param_history = expected_param_history
assert hasattr(engine.state, "param_history")
state_param = engine.state.param_history["pwlinear_scheduled_param"]
assert len(state_param) == len(expected_param_history)
assert state_param == expected_param_history
state_dict = pw_linear_step_parameter_scheduler.state_dict()
pw_linear_step_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)])])
def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values):
# Testing step_constant
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "pwlinear_scheduled_param"), float(milestones_values[0][1]))
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize(
"max_epochs, milestones_values, expected_val",
[(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0)],
)
def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expected_val):
# Testing linear increase
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "pwlinear_scheduled_param"), expected_val, atol=0.001, rtol=0.0)
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])])
def test_pwlinear_scheduler_max_value(max_epochs, milestones_values):
# Testing max_value
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="linear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "linear_scheduled_param"), float(milestones_values[-1][1]))
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
def test_piecewiselinear_asserts():
with pytest.raises(TypeError, match=r"Argument milestones_values should be a list or tuple"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=None)
with pytest.raises(ValueError, match=r"Argument milestones_values should be with at least one value"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5,)])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(10, 0.5), (0.6,)])
with pytest.raises(ValueError, match=r"Milestones should be increasing integers"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(10, 0.5), (5, 0.6)])
with pytest.raises(TypeError, match=r"Value of a milestone should be integer"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5, 1)])
@pytest.mark.parametrize("max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)])
def test_exponential_scheduler(max_epochs, initial_value, gamma):
engine = Engine(lambda e, b: None)
exp_state_parameter_scheduler = ExpStateScheduler(
param_name="exp_scheduled_param", initial_value=initial_value, gamma=gamma, create_new=True
)
exp_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "exp_scheduled_param"), initial_value * gamma**max_epochs)
state_dict = exp_state_parameter_scheduler.state_dict()
exp_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)])
def test_step_scheduler(max_epochs, initial_value, gamma, step_size):
engine = Engine(lambda e, b: None)
step_state_parameter_scheduler = StepStateScheduler(
param_name="step_scheduled_param",
initial_value=initial_value,
gamma=gamma,
step_size=step_size,
create_new=True,
)
step_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(
getattr(engine.state, "step_scheduled_param"), initial_value * gamma ** (max_epochs // step_size)
)
state_dict = step_state_parameter_scheduler.state_dict()
step_state_parameter_scheduler.load_state_dict(state_dict)
from bisect import bisect_right
@pytest.mark.parametrize(
"max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])]
)
def test_multistep_scheduler(max_epochs, initial_value, gamma, milestones):
engine = Engine(lambda e, b: None)
multi_step_state_parameter_scheduler = MultiStepStateScheduler(
param_name="multistep_scheduled_param",
initial_value=initial_value,
gamma=gamma,
milestones=milestones,
create_new=True,
)
multi_step_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(
getattr(engine.state, "multistep_scheduled_param"),
initial_value * gamma ** bisect_right(milestones, max_epochs),
)
state_dict = multi_step_state_parameter_scheduler.state_dict()
multi_step_state_parameter_scheduler.load_state_dict(state_dict)
def test_custom_scheduler():
engine = Engine(lambda e, b: None)
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
lambda_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
engine.run([0] * 8, max_epochs=20)
torch_testing_assert_close(
getattr(engine.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(20)
)
state_dict = lambda_state_parameter_scheduler.state_dict()
lambda_state_parameter_scheduler.load_state_dict(state_dict)
def test_custom_scheduler_asserts():
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
with pytest.raises(ValueError, match=r"Expected lambda_obj to be callable."):
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
@pytest.mark.parametrize("scheduler_cls, scheduler_kwargs", [config3, config4, config5, config6])
def test_simulate_and_plot_values(scheduler_cls, scheduler_kwargs):
import matplotlib
matplotlib.use("Agg")
event = Events.EPOCH_COMPLETED
max_epochs = 2
data = [0] * 10
scheduler = scheduler_cls(**scheduler_kwargs)
trainer = Engine(lambda engine, batch: None)
scheduler.attach(trainer, event)
trainer.run(data, max_epochs=max_epochs)
# launch plot values
scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
@pytest.mark.parametrize("save_history", [False, True])
@pytest.mark.parametrize("scheduler_cls, scheduler_kwargs", [config3, config4, config5, config6])
def test_simulate_values(scheduler_cls, scheduler_kwargs, save_history):
max_epochs = 2
data = [0] * 10
scheduler_kwargs["save_history"] = save_history
scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
def test_torch_save_load(dirname):
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
filepath = Path(dirname) / "dummy_lambda_state_parameter_scheduler.pt"
torch.save(lambda_state_parameter_scheduler, filepath)
loaded_lambda_state_parameter_scheduler = torch.load(filepath)
engine1 = Engine(lambda e, b: None)
lambda_state_parameter_scheduler.attach(engine1, Events.EPOCH_COMPLETED)
engine1.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine1.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
engine2 = Engine(lambda e, b: None)
loaded_lambda_state_parameter_scheduler.attach(engine2, Events.EPOCH_COMPLETED)
engine2.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine2.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
torch_testing_assert_close(
getattr(engine1.state, "custom_scheduled_param"), getattr(engine2.state, "custom_scheduled_param")
)
def test_simulate_and_plot_values_no_matplotlib():
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed."):
with patch.dict("sys.modules", {"matplotlib.pyplot": None}):
event = Events.EPOCH_COMPLETED
max_epochs = 2
data = [0] * 10
kwargs = {
"param_name": "multistep_scheduled_param",
"initial_value": 10,
"gamma": 0.99,
"milestones": [3, 6],
"create_new": True,
}
scheduler = MultiStepStateScheduler(**kwargs)
trainer = Engine(lambda engine, batch: None)
scheduler.attach(trainer, event)
trainer.run(data, max_epochs=max_epochs)
# launch plot values
MultiStepStateScheduler.plot_values(num_events=len(data) * max_epochs, **kwargs)
def test_multiple_scheduler_with_save_history():
engine_multiple_schedulers = Engine(lambda e, b: None)
configs = [config3, config4, config5, config6, config7]
for scheduler, config in configs:
if "save_history" in config:
del config["save_history"]
_scheduler = scheduler(**config, save_history=True)
_scheduler.attach(engine_multiple_schedulers)
engine_multiple_schedulers.run([0] * 8, max_epochs=2)
for scheduler, config in configs:
engine = Engine(lambda e, b: None)
_scheduler = scheduler(**config, save_history=True)
_scheduler.attach(engine)
engine.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
engine_multiple_schedulers.state.param_history[config["param_name"]],
engine.state.param_history[config["param_name"]],
)
def test_docstring_examples():
# LambdaStateScheduler
engine = Engine(lambda e, b: None)
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
param_scheduler = LambdaStateScheduler(param_name="param", lambda_obj=LambdaState(10, 0.99), create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
# PiecewiseLinearStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = PiecewiseLinearStateScheduler(
param_name="param", milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)], create_new=True
)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=40)
# ExpStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = ExpStateScheduler(param_name="param", initial_value=10, gamma=0.99, create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
# StepStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = StepStateScheduler(param_name="param", initial_value=10, gamma=0.99, step_size=5, create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=10)
# MultiStepStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = MultiStepStateScheduler(
param_name="param", initial_value=10, gamma=0.99, milestones=[3, 6], create_new=True
)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=10)
def test_param_scheduler_attach_exception():
trainer = Engine(lambda e, b: None)
param_name = "state_param"
setattr(trainer.state, param_name, None)
save_history = True
create_new = True
param_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name,
milestones_values=[(0, 0.0), (10, 0.999)],
save_history=save_history,
create_new=create_new,
)
with pytest.raises(
ValueError,
match=r"Attribute '" + re.escape(param_name) + "' already exists in the engine.state. "
r"This may be a conflict between multiple handlers. "
r"Please choose another name.",
):
param_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
def test_param_scheduler_attach_warning():
trainer = Engine(lambda e, b: None)
param_name = "state_param"
save_history = True
create_new = False
param_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name,
milestones_values=[(0, 0.0), (10, 0.999)],
save_history=save_history,
create_new=create_new,
)
with pytest.warns(
UserWarning,
match=r"Attribute '" + re.escape(param_name) + "' is not defined in the engine.state. "
r"PiecewiseLinearStateScheduler will create it. Remove this warning by setting create_new=True.",
):
param_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
def test_param_scheduler_with_ema_handler():
from ignite.handlers import EMAHandler
model = nn.Linear(2, 1)
trainer = Engine(lambda e, b: model(b))
data = torch.rand(100, 2)
param_name = "ema_decay"
ema_handler = EMAHandler(model)
ema_handler.attach(trainer, name=param_name, event=Events.ITERATION_COMPLETED)
ema_decay_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999)], save_history=True
)
ema_decay_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=20)
|
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import TimeLimit
def test_arg_validation():
with pytest.raises(ValueError, match=r"Argument limit_sec should be a positive integer."):
TimeLimit(limit_sec=-5)
with pytest.raises(TypeError, match=r"Argument limit_sec should be an integer."):
TimeLimit(limit_sec="abc")
def _train_func(engine, batch):
time.sleep(1)
@pytest.mark.parametrize("n_iters, limit", [(20, 10), (5, 10)])
def test_terminate_on_time_limit(n_iters, limit):
started = time.time()
trainer = Engine(_train_func)
@trainer.on(Events.TERMINATE)
def _():
trainer.state.is_terminated = True
trainer.add_event_handler(Events.ITERATION_COMPLETED, TimeLimit(limit))
trainer.state.is_terminated = False
trainer.run(range(n_iters))
elapsed = round(time.time() - started)
assert elapsed <= limit + 1
assert trainer.state.is_terminated == (n_iters > limit)
|
# Needed to collect coverage data
|
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ExponentialLR, StepLR
from ignite.engine import Engine, Events
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
ReduceLROnPlateauScheduler,
)
from tests.ignite.contrib.handlers import MockFP16DeepSpeedZeroOptimizer
try:
from torch.optim.lr_scheduler import MultiplicativeLR
except ImportError:
has_multiplicative_lr = False
else:
from packaging.version import Version
# https://github.com/pytorch/pytorch/issues/32756
has_multiplicative_lr = Version(torch.__version__) >= Version("1.5.0")
class FakeParamScheduler(ParamScheduler):
def get_param(self):
return [0]
def test_param_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler = FakeParamScheduler(optimizer, "lr")
with pytest.raises(ValueError, match=r"size of value is different than optimizer_param_groups"):
lr_scheduler(None)
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary, but given"):
lr_scheduler.load_state_dict(None)
with pytest.raises(ValueError, match=r"Required state attribute 'event_index' is absent in provided state_dict"):
lr_scheduler.load_state_dict({})
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
FakeParamScheduler({}, "lr")
def test_linear_scheduler():
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
LinearCyclicalScheduler({}, "lr", 1, 0, cycle_size=0)
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=1)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4, # 0.6, 0.8,
],
)
)
scheduler.load_state_dict(state_dict)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, cycle_mult=2)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 10, max_epochs=3)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
],
)
)
scheduler.load_state_dict(state_dict)
# With float cycle_size
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(
optimizer, "lr", start_value=1.2, end_value=0.2, cycle_size=10.00000012, cycle_mult=1.0
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6,
0.8,
1.0,
# Cycle 2
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6, # 0.8, 1.0,
],
)
)
scheduler.load_state_dict(state_dict)
def test_linear_scheduler_cycle_size_two():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=2)
data = [0] * 10
max_epochs = 2
simulated_values = LinearCyclicalScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=1, end_value=0, cycle_size=2
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
)
)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_cosine_annealing_scheduler():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = CosineAnnealingScheduler(optimizer, "lr", 0, 1, 10)
state_dict = scheduler.state_dict()
data = [0] * 9
max_epochs = 2
simulated_values = CosineAnnealingScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=0, end_value=1, cycle_size=10
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365, # 0.9045084971874737, 0.9755282581475768
],
)
)
scheduler.load_state_dict(state_dict)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_concat_scheduler_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(TypeError, match=r"Argument schedulers should be a sequence"):
ConcatScheduler(schedulers=None, durations=[])
with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
ConcatScheduler(schedulers=[], durations=[])
with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
ConcatScheduler(schedulers=[scheduler_1], durations=[10])
with pytest.raises(TypeError, match=r"Value at index 1 of schedulers should be a parameter scheduler"):
ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])
with pytest.raises(ValueError, match=r"Incorrect number schedulers or duration values"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])
with pytest.raises(ValueError, match=r"Argument durations should be list/tuple of integers"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])
with pytest.raises(TypeError, match=r"Argument durations should be list/tuple"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc")
with pytest.raises(TypeError, match=r"Argument param_names should be list or tuple"):
ConcatScheduler.simulate_values(
num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names="abc"
)
with pytest.raises(ValueError, match=r"Argument param_names should be list or tuple of strings"):
ConcatScheduler.simulate_values(
num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names=[1]
)
optimizer_2 = torch.optim.SGD([tensor], lr=0)
scheduler_3 = CosineAnnealingScheduler(optimizer_2, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
ConcatScheduler([scheduler_1, scheduler_3], durations=[30])
scheduler_4 = CosineAnnealingScheduler(optimizer, "lr2", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same param_name"):
ConcatScheduler([scheduler_1, scheduler_4], durations=[30])
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30])
def test_concat_scheduler_state_dict():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)
state_dict = concat_scheduler.state_dict()
assert state_dict["durations"] == durations
assert state_dict["_current_duration"] == durations[0]
assert state_dict["_scheduler_index"] == 0
for _ in range(20):
concat_scheduler(None, None)
concat_scheduler.load_state_dict(state_dict)
assert concat_scheduler.durations == durations
assert concat_scheduler._current_duration == durations[0]
assert id(concat_scheduler._current_scheduler) == id(scheduler_1)
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
concat_scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of concatenated schedulers"):
concat_scheduler.load_state_dict({"schedulers": []})
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary, but given"):
concat_scheduler.load_state_dict(None)
@pytest.mark.parametrize("duration_vals_as_np_int", [False, True])
def test_concat_scheduler_two_schedulers(duration_vals_as_np_int):
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
if duration_vals_as_np_int:
durations = [np.int64(t) for t in durations]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the LinearCyclicalScheduler
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 1 of the CosineAnnealingScheduler
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_concat_scheduler_two_linear():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.0, end_value=0.1, cycle_size=2)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.2, end_value=1.0, cycle_size=2)
durations = [5]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)
state_dict = concat_scheduler.state_dict()
assert concat_scheduler.get_param() == 0.0
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# first LinearCyclicalScheduler
0.0,
0.1,
0.0,
0.1,
0.0,
# second LinearCyclicalScheduler
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_concat_scheduler_3_schedulers():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.5, cycle_size=20)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.45, cycle_size=10)
scheduler_3 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.0, cycle_size=20)
durations = [10, 5]
concat_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations, save_history=True
)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the first LinearCyclicalScheduler
1.0,
0.95,
0.9,
0.85,
0.8,
0.75,
0.7,
0.65,
0.6,
0.55,
# Cycle 1 of the second LinearCyclicalScheduler
0.5,
0.49,
0.48,
0.47,
0.46,
# Cycle 1 of the third LinearCyclicalScheduler
0.5,
0.45,
0.4,
0.35,
0.3,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_save_param_history():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, save_history=True)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
assert not hasattr(trainer.state, "param_history")
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
trainer.run([0] * 10, max_epochs=2)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
def test_lr_scheduler_asserts():
err_msg = r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler.(_LRScheduler|LRScheduler)"
with pytest.raises(TypeError, match=err_msg):
LRScheduler(123)
with pytest.raises(TypeError, match=err_msg):
LRScheduler.simulate_values(1, None)
@pytest.mark.parametrize(
"torch_lr_scheduler_cls, kwargs",
[
(StepLR, ({"step_size": 5, "gamma": 0.5})),
(ExponentialLR, ({"gamma": 0.78})),
(MultiplicativeLR if has_multiplicative_lr else None, ({"lr_lambda": lambda epoch: 0.95})),
],
)
def test_lr_scheduler(torch_lr_scheduler_cls, kwargs):
if torch_lr_scheduler_cls is None:
return
tensor = torch.zeros([1], requires_grad=True)
optimizer1 = torch.optim.SGD([tensor], lr=0.01)
optimizer2 = torch.optim.SGD([tensor], lr=0.01)
optimizer3 = torch.optim.SGD([tensor], lr=0.01)
opt_state_dict1 = optimizer1.state_dict()
opt_state_dict2 = optimizer2.state_dict()
opt_state_dict3 = optimizer3.state_dict()
torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
scheduler1 = LRScheduler(torch_lr_scheduler1)
state_dict1 = scheduler1.state_dict()
torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
with pytest.warns(UserWarning, match=r"the first lr value from the optimizer, otherwise it will be skipped"):
scheduler2 = LRScheduler(torch_lr_scheduler2, use_legacy=True)
state_dict2 = scheduler2.state_dict()
torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
state_dict3 = torch_lr_scheduler3.state_dict()
def dummy_update(engine, batch):
optimizer1.step()
optimizer2.step()
optimizer3.step()
trainer = Engine(dummy_update)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
@trainer.on(Events.ITERATION_STARTED)
def save_lr1(engine):
lrs1.append(optimizer1.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_lr2(engine):
lrs2.append(optimizer2.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_true_lr(engine):
lrs_true.append(optimizer3.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_COMPLETED)
def torch_lr_scheduler_step(engine):
torch_lr_scheduler3.step()
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler2)
for _ in range(2):
lrs1 = []
lrs2 = []
lrs_true = []
data = [0] * 10
max_epochs = 2
trainer.run(data, max_epochs=max_epochs)
assert lrs_true == pytest.approx(lrs1), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs1} ({len(lrs1)})"
assert lrs_true == pytest.approx(lrs2), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs2} ({len(lrs2)})"
optimizer1.load_state_dict(opt_state_dict1)
scheduler1.load_state_dict(state_dict1)
optimizer2.load_state_dict(opt_state_dict2)
scheduler2.load_state_dict(state_dict2)
optimizer3.load_state_dict(opt_state_dict3)
torch_lr_scheduler3.load_state_dict(state_dict3)
optimizer4 = torch.optim.SGD([tensor], lr=0.01)
torch_lr_scheduler4 = torch_lr_scheduler_cls(optimizer=optimizer4, **kwargs)
simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler4)
assert lrs1 == pytest.approx([v for i, v in simulated_values])
assert lrs2 == pytest.approx([v for i, v in simulated_values])
def test_piecewiselinear_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
with pytest.raises(TypeError, match=r"Argument milestones_values should be a list or tuple"):
PiecewiseLinear(optimizer, "lr", milestones_values=None)
with pytest.raises(ValueError, match=r"Argument milestones_values should be with at least one value"):
PiecewiseLinear(optimizer, "lr", milestones_values=[])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5,)])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (0.6,)])
with pytest.raises(ValueError, match=r"Milestones should be increasing integers"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (5, 0.6)])
with pytest.raises(TypeError, match=r"Value of a milestone should be integer"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5, 1)])
@pytest.mark.parametrize("milestones_as_np_int", [True, False])
def test_piecewiselinear(milestones_as_np_int):
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
milestones_values = [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]
if milestones_as_np_int:
milestones_values = [(np.int64(t), v) for t, v in milestones_values]
scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 25, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.85,
0.9,
0.95,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
],
)
)
scheduler.load_state_dict(state_dict)
def test_simulate_and_plot_values():
import matplotlib
matplotlib.use("Agg")
def _test(scheduler_cls, **scheduler_kwargs):
if scheduler_cls == LRScheduler:
optimizer = scheduler_kwargs["lr_scheduler"].optimizer
elif scheduler_cls == ConcatScheduler:
optimizer = scheduler_kwargs["optimizer"]
del scheduler_kwargs["optimizer"]
else:
tensor = torch.zeros([1], requires_grad=True)
scheduler_kwargs["optimizer"] = torch.optim.SGD([tensor], lr=0.1)
optimizer = scheduler_kwargs["optimizer"]
max_epochs = 2
data = [0] * 10
simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
scheduler = scheduler_cls(**scheduler_kwargs)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in simulated_values])
# reexecute to check if no internal changes
# simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,
# save_history=True, # this will be removed
# **scheduler_kwargs)
# assert lrs == pytest.approx([v for i, v in simulated_values])
# launch plot values
scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
# LinearCyclicalScheduler
_test(LinearCyclicalScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# CosineAnnealingScheduler
_test(CosineAnnealingScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# LRScheduler
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.1)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.5)
_test(LRScheduler, lr_scheduler=torch_lr_scheduler)
# ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler]
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=20)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# ConcatScheduler = [LinearCyclicalScheduler, LRScheduler]
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5)
scheduler_1 = LRScheduler(torch_lr_scheduler)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.1, end_value=0.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# PiecewiseLinear
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
_test(
PiecewiseLinear,
optimizer=optimizer,
param_name="lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
)
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed."):
with patch.dict("sys.modules", {"matplotlib.pyplot": None}):
_test(
PiecewiseLinear,
optimizer=optimizer,
param_name="lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
)
def test_create_lr_scheduler_with_warmup_asserts():
with pytest.raises(TypeError, match=r"Argument lr_scheduler should be a subclass of"):
create_lr_scheduler_with_warmup(12, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10)
t1 = torch.zeros([1], requires_grad=True)
# A) opt lr != warmup_end_value
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=1
)
with pytest.raises(TypeError, match=r"Argument warmup_duration should be integer"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration="abc"
)
with pytest.raises(TypeError, match=r"Argument output_simulated_values should be a list of None"):
simulated_values = ()
create_lr_scheduler_with_warmup(
torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=10,
output_simulated_values=simulated_values,
)
@pytest.mark.parametrize(
"lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value",
[
# A) opt lr != warmup_end_value
("ExponentialLR", 0.01, 0.05, 10, 0.2),
("ExponentialLR", 0.01, 0.05, 2, 0.2),
# B) opt lr == warmup_end_value
("ExponentialLR", 0.01, 0.2, 10, 0.2 * 0.98),
("ExponentialLR", 0.01, 0.2, 2, 0.2 * 0.98),
# C) lr_scheduler start_value != warmup_end_value
("LinearCyclicalScheduler", 0.01, 0.05, 10, 0.8),
("LinearCyclicalScheduler", 0.01, 0.05, 2, 0.8),
# D) lr_scheduler start_value == warmup_end_value
("LinearCyclicalScheduler", 0.01, 0.8, 10, 0.8 - (0.8 / 5.0)),
("LinearCyclicalScheduler", 0.01, 0.8, 2, 0.8 - (0.8 / 5.0)),
# E) warmup_end_value is None: fall back to case B)
("ExponentialLR", 0.01, None, 10, 0.2 * 0.98),
],
)
def test_create_lr_scheduler_with_warmup(
lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value
):
t1 = torch.zeros([1], requires_grad=True)
if lr_scheduler_name == "ExponentialLR":
optimizer = torch.optim.SGD([t1], lr=0.2)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
elif lr_scheduler_name == "LinearCyclicalScheduler":
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
else:
raise ValueError(f"Unknown name: {lr_scheduler_name}")
num_iterations = 10
max_epochs = 20
if warmup_end_value is None:
expected_warmup_end_value = optimizer.param_groups[0]["lr"]
else:
expected_warmup_end_value = warmup_end_value
simulated_values = [None] * (num_iterations * max_epochs)
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=warmup_start_value,
warmup_end_value=warmup_end_value,
warmup_duration=warmup_duration,
output_simulated_values=simulated_values,
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_STARTED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for _, v in simulated_values])
assert lrs[0] == pytest.approx(warmup_start_value), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration - 1] == pytest.approx(
expected_warmup_end_value
), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration] == pytest.approx(
warmup_end_next_value
), f"lrs={lrs[: warmup_duration + num_iterations]}"
scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("save_history", [False, True])
def test_create_lr_scheduler_with_warmup_on_combined_scheduler(save_history):
# Test with a complex scheduler
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
max_epochs = 25
lr_max_value = 0.4
num_iterations_per_epoch = 128
num_iterations = max_epochs * num_iterations_per_epoch
warmup_duration = 5 * num_iterations_per_epoch
cooldown_duration = 5 * num_iterations_per_epoch
scheduler_1 = LinearCyclicalScheduler(
optimizer,
"lr",
start_value=lr_max_value,
end_value=lr_max_value * 0.9,
cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2,
)
scheduler_2 = LinearCyclicalScheduler(
optimizer, "lr", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2
)
lr_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2],
durations=[num_iterations - warmup_duration - cooldown_duration],
save_history=False,
)
lr_values = [None] * num_iterations
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=lr_max_value,
warmup_duration=warmup_duration,
save_history=save_history,
output_simulated_values=lr_values,
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations_per_epoch
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in lr_values])
if save_history:
param_history = trainer.state.param_history["lr"]
assert lrs == pytest.approx([v[0] for v in param_history])
trainer.state.param_history = None
scheduler.load_state_dict(state_dict)
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):
model = dummy_model_factory(with_grads=False, with_frozen_layer=False)
init_lr = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)
scaled_lr = 0.02
warmup_duration = 5
step_size = 2
gamma = 0.97
output_simulated_values = [None] * 50
create_lr_scheduler_with_warmup(
torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),
warmup_start_value=0.0,
warmup_end_value=scaled_lr,
warmup_duration=warmup_duration,
output_simulated_values=output_simulated_values,
)
assert output_simulated_values[0] == [0, 0.0]
assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]
assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]
v = [warmup_duration + step_size, init_lr * gamma]
assert output_simulated_values[warmup_duration + step_size] == v
def test_param_group_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
with pytest.raises(TypeError, match=r"Argument schedulers should be a list/tuple"):
ParamGroupScheduler(schedulers=None, names=["a", "b", "c"])
with pytest.raises(ValueError, match=r"Argument schedulers should be a list/tuple of parameter schedulers"):
ParamGroupScheduler(schedulers=[0, 1, 2], names=["a", "b", "c"])
with pytest.raises(ValueError, match=r"Argument schedulers should be a list/tuple of parameter schedulers"):
ParamGroupScheduler(schedulers=[lr_scheduler1, "2"], names=["a", "b"])
with pytest.raises(TypeError, match=r"Argument names should be a list/tuple"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names="ab")
with pytest.raises(ValueError, match=r"Argument names should be a list/tuple of parameter scheduler's names"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=[1, 2])
with pytest.raises(ValueError, match=r"\d should be equal \d"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a"])
scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a", "b"])
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary"):
scheduler.load_state_dict(None)
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of param group schedulers"):
scheduler.load_state_dict({"schedulers": []})
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
scheduler.load_state_dict({})
with pytest.raises(
ValueError, match=r"Name of scheduler from input state dict does not " r"correspond to required one"
):
scheduler.load_state_dict({"schedulers": [("a", lr_scheduler1.state_dict()), ("bad_name", {})]})
@pytest.mark.parametrize("param_groups_setting", ["single_optim", "multi_optim"])
def test_param_group_scheduler(param_groups_setting):
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
if param_groups_setting == "single_optim":
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
else:
optimizer_1 = torch.optim.SGD(params=[t1], lr=0.1)
optimizer_2 = torch.optim.SGD(params=[t2], lr=0.1)
lr_scheduler1 = LinearCyclicalScheduler(optimizer_1, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
lr_scheduler2 = LinearCyclicalScheduler(optimizer_2, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
lr_schedulers = [lr_scheduler1, lr_scheduler2]
num_iterations = 10
max_epochs = 20
scheduler = ParamGroupScheduler(lr_schedulers, names=[f"s_{i}" for i in range(len(lr_schedulers))])
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
lrs = []
@trainer.on(Events.ITERATION_STARTED, lrs)
def save_lr(_, lrs):
lrs.append(scheduler.get_param())
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations
for _ in range(2):
lrs.clear()
trainer.run(data, max_epochs=max_epochs)
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
scheduler.load_state_dict(state_dict)
values = ParamGroupScheduler.simulate_values(max_epochs * num_iterations, lr_schedulers)
assert [lr[1] for lr in values] == pytest.approx([lr[2] for lr in values])
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in values])
@pytest.mark.parametrize(
"scheduler_cls, kwargs",
[
(LinearCyclicalScheduler, {"param_name": "lr", "start_value": 1.0, "end_value": 0.0, "cycle_size": 10}),
(
PiecewiseLinear,
{"param_name": "lr", "milestones_values": [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]},
),
(CosineAnnealingScheduler, {"param_name": "lr", "start_value": 0.0, "end_value": 1.0, "cycle_size": 10}),
(ExponentialLR, {"gamma": 0.98}),
(StepLR, {"step_size": 50, "gamma": 0.5}),
],
)
def test_scheduler_with_param_groups(scheduler_cls, kwargs):
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler = scheduler_cls(optimizer, **kwargs)
if not isinstance(lr_scheduler, ParamScheduler):
lr_scheduler = LRScheduler(lr_scheduler)
num_iterations = 10
max_epochs = 20
state_dict = lr_scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr():
lrs.append((optimizer.param_groups[0]["lr"], optimizer.param_groups[1]["lr"]))
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
lr_scheduler.load_state_dict(state_dict)
def test_lr_scheduling_on_non_torch_optimizers():
# tests https://github.com/pytorch/ignite/issues/1162
optimizer = MagicMock()
optimizer.param_groups = [{"params": 0}]
FakeParamScheduler(optimizer, "lr")
tensor = torch.zeros([1], requires_grad=True)
base_optimizer = torch.optim.SGD([tensor], lr=0)
optimizer = MockFP16DeepSpeedZeroOptimizer(base_optimizer)
milestones_values = [(5, 0.5), (15, 1.0)]
scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
lrs = []
trainer.run([0] * 15, max_epochs=1)
assert lrs == list(
map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
)
def test_reduce_lr_on_plateau_scheduler():
tensor1 = torch.zeros([1], requires_grad=True)
tensor2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": [tensor1]}, {"params": [tensor2]}], lr=1)
data = [0] * 8
max_epochs = 10
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluate():
evaluator.run(data)
scheduler = ReduceLROnPlateauScheduler(
optimizer,
metric_name="acc",
mode="max",
factor=0.5,
patience=1,
threshold_mode="abs",
threshold=1.99,
min_lr=1e-7,
save_history=True,
trainer=trainer,
param_group_index=0,
)
evaluator = Engine(lambda engine, batch: None)
evaluator.state.metrics = {"acc": 0.0}
generate_acc = iter([3, 7, 7, 9, 10, 11, 8, 8, 4, 7])
@evaluator.on(Events.COMPLETED)
def set_acc():
evaluator.state.metrics["acc"] = next(generate_acc)
evaluator.add_event_handler(Events.COMPLETED, scheduler)
trainer.run(data, max_epochs=max_epochs)
lrs = [param[0] for param in trainer.state.param_history["lr"]]
assert lrs == list(
map(
pytest.approx,
[1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.25],
)
)
assert optimizer.param_groups[1]["lr"] == 1
values = ReduceLROnPlateauScheduler.simulate_values(
5, [10, 9, 9, 9, 8.1], 1.0, save_history=True, factor=0.5, patience=2, threshold=0.1
)
values = np.array(values)[:, 1].tolist()
assert values == list(
map(
pytest.approx,
[1.0, 1.0, 1.0, 0.5, 0.5],
)
)
def test_reduce_lr_on_plateau_scheduler_asserts():
tensor1 = torch.zeros([1], requires_grad=True)
tensor2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": [tensor1]}, {"params": [tensor2]}], lr=1)
with pytest.raises(TypeError, match=r"When param_group_index is given, min_lr should be a float, but given"):
ReduceLROnPlateauScheduler(
optimizer,
metric_name="acc",
min_lr=[1e-7, 1e-8],
param_group_index=0,
)
with pytest.raises(
ValueError, match=r"Argument engine should have in its 'state', attribute 'metrics' which itself has the metric"
):
scheduler = ReduceLROnPlateauScheduler(optimizer, metric_name="acc")
evaluator = Engine(lambda engine, batch: None)
scheduler(evaluator)
with pytest.raises(ValueError, match=r"Length of argument metric_values should be equal to num_events."):
metric_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
ReduceLROnPlateauScheduler.simulate_values(5, metric_values, 0.01)
@pytest.mark.parametrize("warmup_end_value", [0.23, None])
@pytest.mark.parametrize("T_0", [1, 12])
@pytest.mark.parametrize("T_mult", [1, 3])
def test_create_lr_scheduler_with_warmup_cosine(warmup_end_value, T_0, T_mult):
lr = 0.2
steps = 200
warm_steps = 50
warm_start = 0.023
def get_optim():
t1 = torch.zeros([1], requires_grad=True)
return torch.optim.SGD([t1], lr=lr)
def get_cos_shed():
return CosineAnnealingWarmRestarts(optimizer, T_0=T_0, T_mult=T_mult)
optimizer = get_optim()
scheduler = get_cos_shed()
cosine_lrs = []
for i in range(steps):
cosine_lrs.append(optimizer.param_groups[0]["lr"])
scheduler.step()
optimizer = get_optim()
scheduler = create_lr_scheduler_with_warmup(
get_cos_shed(), warmup_start_value=warm_start, warmup_end_value=warmup_end_value, warmup_duration=warm_steps
)
warm_lrs = []
real_warm_steps = warm_steps if warmup_end_value is not None else (warm_steps - 1)
for epoch in range(real_warm_steps + steps):
scheduler(None)
warm_lrs.append(optimizer.param_groups[0]["lr"])
if warmup_end_value is not None:
np.testing.assert_allclose(np.linspace(warm_start, warmup_end_value, warm_steps), warm_lrs[:warm_steps])
assert warm_lrs[real_warm_steps:] == cosine_lrs
else:
np.testing.assert_allclose(np.linspace(warm_start, lr, warm_steps), warm_lrs[:warm_steps])
assert warm_lrs[real_warm_steps:] == cosine_lrs
|
import sys
import time
from unittest.mock import patch
import pytest
from pytest import approx
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def _do_nothing_update_fn(engine, batch):
pass
def get_prepared_engine_for_basic_profiler(true_event_handler_time):
dummy_trainer = Engine(_do_nothing_update_fn)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
return dummy_trainer
def get_prepared_engine_for_handlers_profiler(true_event_handler_time):
HANDLERS_SLEEP_COUNT = 11
PROCESSING_SLEEP_COUNT = 3
class CustomEvents(EventEnum):
CUSTOM_STARTED = "custom_started"
CUSTOM_COMPLETED = "custom_completed"
def dummy_train_step(engine, batch):
engine.fire_event(CustomEvents.CUSTOM_STARTED)
time.sleep(true_event_handler_time)
engine.fire_event(CustomEvents.CUSTOM_COMPLETED)
dummy_trainer = Engine(dummy_train_step)
dummy_trainer.register_events(*CustomEvents)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(CustomEvents.CUSTOM_STARTED)
def delay_custom_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(CustomEvents.CUSTOM_COMPLETED)
def delay_custom_completed(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED(once=1))
def do_something_once_on_1_epoch():
time.sleep(true_event_handler_time)
return dummy_trainer, HANDLERS_SLEEP_COUNT, PROCESSING_SLEEP_COUNT
def test_profilers_wrong_inputs():
profiler = BasicTimeProfiler()
with pytest.raises(TypeError, match=r"Argument engine should be ignite.engine.Engine"):
profiler.attach(None)
with pytest.raises(ModuleNotFoundError, match=r"Need pandas to write results as files"):
with patch.dict("sys.modules", {"pandas": None}):
profiler.write_results("")
profiler = HandlersTimeProfiler()
with pytest.raises(TypeError, match=r"Argument engine should be ignite.engine.Engine"):
profiler.attach(None)
with pytest.raises(ModuleNotFoundError, match=r"Need pandas to write results as files"):
with patch.dict("sys.modules", {"pandas": None}):
profiler.write_results("")
def test_dataflow_timer_basic_profiler():
true_dataflow_time_per_ele = 0.1
true_max_epochs = 1
true_num_iters = 2
def dummy_data_loader(data):
while True:
for d in data:
time.sleep(true_dataflow_time_per_ele)
yield d
dummy_data = range(true_num_iters)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters)
results = profiler.get_results()
dataflow_results = results["dataflow_stats"]
assert dataflow_results["min/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["max/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["mean"] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["std"] == approx(0.0, abs=1e-1)
assert dataflow_results["total"] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1)
def test_dataflow_timer_handlers_profiler():
true_dataflow_time_per_ele = 0.1
true_max_epochs = 1
true_num_iters = 2
def dummy_data_loader(data):
while True:
for d in data:
time.sleep(true_dataflow_time_per_ele)
yield d
dummy_data = range(true_num_iters)
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters)
results = profiler.get_results()
dataflow_results = results[-1]
assert dataflow_results[0] == "Dataflow"
# event name
assert dataflow_results[1] == "None"
# total
assert dataflow_results[2] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1)
# min
assert dataflow_results[3][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
# max
assert dataflow_results[4][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
# mean
assert dataflow_results[5] == approx(true_dataflow_time_per_ele, abs=1e-1)
# stddev
assert dataflow_results[6] == approx(0.0, abs=1e-1)
def test_processing_timer_basic_profiler():
true_processing_time = 0.1
true_max_epochs = 2
true_num_iters = 2
def train_updater(engine, batch):
time.sleep(true_processing_time)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(train_updater)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
processing_results = results["processing_stats"]
assert processing_results["min/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["max/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["mean"] == approx(true_processing_time, abs=1e-1)
assert processing_results["std"] == approx(0.0, abs=1e-1)
assert processing_results["total"] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1)
def test_processing_timer_handlers_profiler():
true_processing_time = 0.1
true_max_epochs = 2
true_num_iters = 2
def train_updater(engine, batch):
time.sleep(true_processing_time)
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(train_updater)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
processing_results = results[-2]
assert processing_results[0] == "Processing"
# event name
assert processing_results[1] == "None"
# total
assert processing_results[2] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1)
# min
assert processing_results[3][0] == approx(true_processing_time, abs=1e-1)
# max
assert processing_results[4][0] == approx(true_processing_time, abs=1e-1)
# mean
assert processing_results[5] == approx(true_processing_time, abs=1e-1)
# stddev
assert processing_results[6] == approx(0.0, abs=1e-1)
def test_event_handler_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["STARTED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_start" in event_results[0]
assert event_results[1] == "STARTED"
assert event_results[2] == approx(true_event_handler_time, abs=1e-1) # total
def test_event_handler_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["COMPLETED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_complete" in event_results[0]
assert event_results[1] == "COMPLETED"
assert event_results[2] == approx(true_event_handler_time, abs=1e-1) # total
def test_event_handler_epoch_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_epoch_start" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_epoch_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_epoch_complete" in event_results[0]
assert event_results[1] == "EPOCH_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_iteration_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_iter_start" in event_results[0]
assert event_results[1] == "ITERATION_STARTED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_iteration_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_iter_complete" in event_results[0]
assert event_results[1] == "ITERATION_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_get_batch_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_get_batch_started" in event_results[0]
assert event_results[1] == "GET_BATCH_STARTED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_get_batch_completed():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_get_batch_completed" in event_results[0]
assert event_results[1] == "GET_BATCH_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_neg_event_filter_threshold_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED(once=2))
def do_something_once_on_2_epoch():
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "do_something_once_on_2_epoch" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == "not triggered"
def test_pos_event_filter_threshold_handlers_profiler():
true_event_handler_time = HandlersTimeProfiler.EVENT_FILTER_THESHOLD_TIME
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED(once=2))
def do_something_once_on_2_epoch():
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "do_something_once_on_2_epoch" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == approx(
(true_max_epochs * true_num_iters * true_event_handler_time) / 2, abs=1e-1
) # total
def test_custom_event_with_arg_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
dummy_trainer.register_events("custom_event")
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED(every=1))
def trigger_custom_event():
dummy_trainer.fire_event("custom_event")
args = [122, 324]
@dummy_trainer.on("custom_event", args)
def on_custom_event(args):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = None
for row in results:
if row[1] == "custom_event":
event_results = row
break
assert event_results is not None
assert "on_custom_event" in event_results[0]
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_total_time_basic_profiler():
true_event_handler_time = 0.125
true_max_epochs = 1
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]
assert event_results["total_time"].item() == approx(true_event_handler_time * 8, abs=1e-1)
def test_event_handler_total_time_handlers_profiler():
true_event_handler_time = 0.125
true_max_epochs = 1
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer, handlers_sleep_count, processing_sleep_count = get_prepared_engine_for_handlers_profiler(
true_event_handler_time
)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
total_handler_stats = results[-3] # total result row
total_processing_stats = results[-2] # processing result row
assert total_handler_stats[2] == approx(true_event_handler_time * handlers_sleep_count, abs=1e-1) # total time
assert total_processing_stats[2] == approx(true_event_handler_time * processing_sleep_count, abs=1e-1) # total time
def test_write_results_basic_profiler(dirname):
true_event_handler_time = 0.125
true_max_epochs = 3
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
fp = dirname / "test_log.csv"
profiler.write_results(fp)
assert fp.is_file()
file_length = 0
with open(fp) as f:
for _ in f:
file_length += 1
assert file_length == (true_max_epochs * true_num_iters) + 1
def test_write_results_handlers_profiler(dirname):
true_event_handler_time = 0.125
true_max_epochs = 3
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer, _, _ = get_prepared_engine_for_handlers_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
fp = dirname / "test_log.csv"
profiler.write_results(fp)
assert fp.is_file()
file_length = 0
with open(fp) as f:
for _ in f:
file_length += 1
assert file_length == (true_max_epochs * true_num_iters) + 1
def test_print_results_basic_profiler(capsys):
true_max_epochs = 1
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time=0.0125)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
BasicTimeProfiler.print_results(profiler.get_results())
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
def test_print_results_handlers_profiler_handlers_profiler(capsys):
true_max_epochs = 1
true_num_iters = 5
profiler = HandlersTimeProfiler()
dummy_trainer, _, _ = get_prepared_engine_for_handlers_profiler(true_event_handler_time=0.0125)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
HandlersTimeProfiler.print_results(profiler.get_results())
captured = capsys.readouterr()
out = captured.out
assert "HandlersTimeProfiler." not in out
assert "Timer." not in out
def test_get_intermediate_results_during_run_basic_profiler(capsys):
true_event_handler_time = 0.0645
true_max_epochs = 2
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED(every=3))
def log_results(_):
results = profiler.get_results()
profiler.print_results(results)
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
assert " min/index: (0.0, " not in out, out
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
|
import copy
import os
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib
import pytest
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
import ignite.distributed as idist
from ignite.contrib.handlers import FastaiLRFinder
from ignite.engine import create_supervised_trainer, Engine, Events
matplotlib.use("agg")
@pytest.fixture
def no_site_packages():
import sys
matplotlib = sys.modules["matplotlib"]
del sys.modules["matplotlib"]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
sys.modules["matplotlib"] = matplotlib
class DummyModel(nn.Module):
def __init__(self, n_channels=10, out_channels=1, flatten_input=False):
super(DummyModel, self).__init__()
self.net = nn.Sequential(nn.Flatten() if flatten_input else nn.Identity(), nn.Linear(n_channels, out_channels))
def forward(self, x):
return self.net(x)
class DummyModelMulipleParamGroups(nn.Module):
def __init__(self):
super(DummyModelMulipleParamGroups, self).__init__()
self.fc1 = nn.Linear(10, 20)
self.fc2 = nn.Linear(20, 10)
self.fc3 = nn.Linear(10, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
@pytest.fixture
def model():
model = DummyModel(out_channels=10)
yield model
@pytest.fixture
def model_multiple_param_groups():
model_multiple_param_groups = DummyModelMulipleParamGroups()
yield model_multiple_param_groups
@pytest.fixture
def mnist_model():
model = DummyModel(n_channels=784, out_channels=10, flatten_input=True)
yield model
@pytest.fixture
def optimizer(model):
yield SGD(model.parameters(), lr=1e-4, momentum=0.0)
@pytest.fixture
def optimizer_multiple_param_groups(model_multiple_param_groups):
optimizer_multiple_param_groups = SGD(
[
{"params": model_multiple_param_groups.fc1.parameters(), "lr": 4e-1},
{"params": model_multiple_param_groups.fc2.parameters(), "lr": 3e-2},
{"params": model_multiple_param_groups.fc3.parameters(), "lr": 3e-3},
]
)
yield optimizer_multiple_param_groups
@pytest.fixture
def mnist_optimizer(mnist_model):
yield SGD(mnist_model.parameters(), lr=1e-4, momentum=0.0)
@pytest.fixture
def to_save(model, optimizer):
yield {"model": model, "optimizer": optimizer}
@pytest.fixture
def mnist_to_save(mnist_model, mnist_optimizer):
yield {"model": mnist_model, "optimizer": mnist_optimizer}
@pytest.fixture
def to_save_mulitple_param_groups(model_multiple_param_groups, optimizer_multiple_param_groups):
yield {"model": model_multiple_param_groups, "optimizer": optimizer_multiple_param_groups}
@pytest.fixture
def lr_finder():
yield FastaiLRFinder()
@pytest.fixture
def dummy_engine(model, optimizer):
engine = create_supervised_trainer(model, optimizer, nn.MSELoss())
yield engine
@pytest.fixture
def dummy_engine_mnist(mnist_model, mnist_optimizer):
mnist_engine = create_supervised_trainer(mnist_model, mnist_optimizer, nn.CrossEntropyLoss())
yield mnist_engine
@pytest.fixture
def dummy_engine_mulitple_param_groups(model_multiple_param_groups, optimizer_multiple_param_groups):
engine_multiple_param_groups = create_supervised_trainer(
model_multiple_param_groups, optimizer_multiple_param_groups, nn.MSELoss()
)
yield engine_multiple_param_groups
@pytest.fixture
def dataloader():
yield torch.rand(100, 2, 10)
@pytest.fixture
def dataloader_plot():
yield torch.rand(500, 2, 10)
@pytest.fixture
def mnist_dataloader():
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root="/tmp", transform=data_transform, train=True), batch_size=256, shuffle=True
)
yield train_loader
def test_attach_incorrect_input_args(lr_finder, dummy_engine, model, optimizer, dataloader):
with pytest.raises(TypeError, match=r"Argument to_save should be a mapping"):
with lr_finder.attach(dummy_engine, to_save=123):
pass
with pytest.raises(TypeError, match=r"Object <class 'int'> should have `state_dict` method"):
with lr_finder.attach(dummy_engine, to_save={1: 2}):
pass
with pytest.raises(ValueError, match=r"Mapping to_save should contain 'optimizer' key"):
with lr_finder.attach(dummy_engine, to_save={"model": model}):
pass
to_save = {"model": model, "optimizer": optimizer}
with pytest.raises(ValueError, match=r"smooth_f is outside the range \[0, 1\]"):
with lr_finder.attach(dummy_engine, to_save=to_save, smooth_f=234):
pass
with pytest.raises(ValueError, match=r"diverge_th should be larger than 1"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=0.0):
pass
with pytest.raises(TypeError, match=r"if provided, num_iter should be an integer"):
with lr_finder.attach(dummy_engine, to_save=to_save, num_iter=0.0):
pass
with pytest.raises(ValueError, match=r"if provided, num_iter should be positive"):
with lr_finder.attach(dummy_engine, to_save=to_save, num_iter=0):
pass
with pytest.raises(TypeError, match=r"Object to_save\['optimizer'] should be torch optimizer"):
with lr_finder.attach(dummy_engine, {"model": to_save["model"], "optimizer": to_save["model"]}):
pass
with pytest.raises(ValueError, match=r"step_mode should be 'exp' or 'linear'"):
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="abc"):
pass
with lr_finder.attach(dummy_engine, to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
with pytest.raises(ValueError, match=r"skip_start cannot be negative"):
lr_finder.plot(skip_start=-1)
with pytest.raises(ValueError, match=r"skip_end cannot be negative"):
lr_finder.plot(skip_end=-1)
with pytest.raises(ValueError, match=r"Number of values of start_lr should be equal to optimizer values."):
with lr_finder.attach(dummy_engine, to_save, start_lr=[0.1, 0.1]):
pass
with pytest.raises(ValueError, match=r"Number of values of end_lr should be equal to optimizer values."):
with lr_finder.attach(dummy_engine, to_save, end_lr=[0.1, 0.1]):
pass
with pytest.raises(TypeError, match=r"start_lr should be a float or list of floats"):
with lr_finder.attach(dummy_engine, to_save, start_lr=1):
pass
with pytest.raises(TypeError, match=r"end_lr should be a float or list of floats"):
with lr_finder.attach(dummy_engine, to_save, end_lr=1):
pass
def test_attach_without_with(lr_finder, dummy_engine, to_save):
_ = lr_finder.attach(dummy_engine, to_save=to_save)
for event in dummy_engine._event_handlers:
assert len(dummy_engine._event_handlers[event]) == 0
with lr_finder.attach(dummy_engine, to_save=to_save) as _:
assert any([len(dummy_engine._event_handlers[event]) != 0 for event in dummy_engine._event_handlers])
with pytest.raises(
RuntimeError, match=r"learning rate finder didn't run yet so lr_suggestion can't be returned"
):
lr_finder.lr_suggestion()
with pytest.raises(RuntimeError, match=r"learning rate finder didn't run yet so results can't be plotted"):
lr_finder.plot()
def test_with_attach(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save=to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert lr_finder.get_results() is not None
for event in dummy_engine._event_handlers:
assert len(dummy_engine._event_handlers[event]) == 0
def test_wrong_values_start_lr_and_end_lr(
lr_finder, dummy_engine, to_save, dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups
):
with pytest.raises(ValueError, match=r"start_lr must be less than end_lr"):
with lr_finder.attach(dummy_engine, to_save=to_save, start_lr=10.0, end_lr=1.0):
pass
with pytest.raises(ValueError, match=r"start_lr must be less than end_lr"):
with lr_finder.attach(
dummy_engine_mulitple_param_groups,
to_save=to_save_mulitple_param_groups,
start_lr=[1.0, 10.0, 5.0],
end_lr=[10.0, 10.0, 10.0],
):
pass
def test_model_optimizer_reset(lr_finder, to_save, dummy_engine, dataloader):
optimizer = to_save["optimizer"]
model = to_save["model"]
init_optimizer_sd = copy.deepcopy(optimizer.state_dict())
init_model_sd = copy.deepcopy(model.state_dict())
init_trainer_sd = copy.deepcopy(dummy_engine.state_dict())
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert init_optimizer_sd == optimizer.state_dict()
for tensor1, tensor2 in zip(init_model_sd.values(), model.state_dict().values()):
assert torch.all(torch.eq(tensor1, tensor2))
assert init_trainer_sd == dummy_engine.state_dict()
def test_lr_policy(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="linear") as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr = lr_finder.get_results()["lr"]
assert all([lr[i - 1] < lr[i] for i in range(1, len(lr))])
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="exp") as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr = lr_finder.get_results()["lr"]
assert all([lr[i - 1] < lr[i] for i in range(1, len(lr))])
@pytest.mark.parametrize("step_mode", ["exp", "linear"])
def test_multiple_optimizers(
lr_finder, dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups, dataloader, step_mode
):
start_lr = [0.1, 0.1, 0.01]
end_lr = [1.0, 1.0, 1.0]
with lr_finder.attach(
dummy_engine_mulitple_param_groups,
to_save_mulitple_param_groups,
start_lr=start_lr,
end_lr=end_lr,
step_mode=step_mode,
) as trainer:
trainer.run(dataloader)
groups_lrs = lr_finder.get_results()["lr"]
assert [all([group_lrs[i - 1] < group_lrs[i] for i in range(1, len(group_lrs))]) for group_lrs in groups_lrs]
def assert_output_sizes(lr_finder, dummy_engine):
iteration = dummy_engine.state.iteration
lr_finder_results = lr_finder.get_results()
lr, loss = lr_finder_results["lr"], lr_finder_results["loss"]
assert len(lr) == len(loss) == iteration
def test_num_iter_is_none(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
assert dummy_engine.state.iteration == len(dataloader)
def test_num_iter_is_enough(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(
dummy_engine, to_save=to_save, num_iter=50, diverge_th=float("inf")
) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
# -1 because it terminates when state.iteration > num_iter
assert dummy_engine.state.iteration - 1 == 50
def test_num_iter_is_not_enough(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save, num_iter=150, diverge_th=float("inf")) as trainer_with_finder:
with pytest.warns(UserWarning):
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
assert dummy_engine.state.iteration != len(dataloader)
assert dummy_engine.state.iteration == 150
def test_detach_terminates(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save, end_lr=100.0, diverge_th=2) as trainer_with_finder:
trainer_with_finder.run(dataloader)
dummy_engine.run(dataloader, max_epochs=3)
assert dummy_engine.state.epoch == 3
def test_different_num_iters(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save, num_iter=200, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert trainer_with_finder.state.iteration == 200 # num_iter
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save, num_iter=1000, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert trainer_with_finder.state.iteration == 1000 # num_iter
@pytest.mark.parametrize("step_mode", ["exp", "linear"])
def test_start_lr(lr_finder, to_save, dummy_engine, dataloader, step_mode):
with lr_finder.attach(
dummy_engine, to_save, start_lr=0.01, end_lr=10.0, num_iter=5, step_mode=step_mode, diverge_th=1
) as trainer_with_finder:
trainer_with_finder.run(dataloader)
history = lr_finder.get_results()
if step_mode == "exp":
assert 0.01 < history["lr"][0] < 0.16
else:
assert pytest.approx(history["lr"][0]) == 0.01
def test_engine_output_type(lr_finder, dummy_engine, optimizer):
from ignite.handlers.param_scheduler import PiecewiseLinear
dummy_engine.state.iteration = 1
dummy_engine.state.output = [10]
with pytest.raises(TypeError, match=r"output of the engine should be of type float or 0d torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
dummy_engine.state.output = (10, 5)
with pytest.raises(TypeError, match=r"output of the engine should be of type float or 0d torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
dummy_engine.state.output = torch.tensor([1, 2], dtype=torch.float32)
with pytest.raises(ValueError, match=r"if output of the engine is torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
lr_finder._lr_schedule = PiecewiseLinear(
optimizer, param_name="lr", milestones_values=[(0, optimizer.param_groups[0]["lr"]), (100, 10)]
)
dummy_engine.state.output = torch.tensor(10.0, dtype=torch.float32)
lr_finder._history = {"lr": [], "loss": []}
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
loss = lr_finder._history["loss"][-1]
assert type(loss) is float
dummy_engine.state.output = torch.tensor([10.0], dtype=torch.float32)
lr_finder._history = {"lr": [], "loss": []}
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
loss = lr_finder._history["loss"][-1]
assert type(loss) is float
def test_lr_suggestion_unexpected_curve(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr_finder._history["loss"].insert(0, 0)
with pytest.raises(
RuntimeError, match=r"FastaiLRFinder got unexpected curve shape, the curve should be somehow U-shaped"
):
lr_finder.lr_suggestion()
def test_lr_suggestion_single_param_group(lr_finder): # , to_save, dummy_engine, dataloader):
import numpy as np
noise = 0.05
lr_finder._history["loss"] = np.linspace(-5.0, 5.0, num=100) ** 2 + noise
lr_finder._history["lr"] = np.linspace(0.01, 10, num=100)
# lr_finder.lr_suggestion() is supposed to return a value, but as
# we assign loss and lr to tensors, instead of lists, it will return tensors
suggested_lr = lr_finder.lr_suggestion()
assert pytest.approx(suggested_lr.item()) == 0.110909089
def test_lr_suggestion_multiple_param_groups(lr_finder):
import numpy as np
noise = 0.06
lr_finder._history["loss"] = np.linspace(-5.0, 5, num=50) ** 2 + noise
# 2 param_groups
lr_finder._history["lr"] = np.linspace(0.01, 10, num=100).reshape(50, 2)
# lr_finder.lr_suggestion() is supposed to return a list of values,
# but as we assign loss and lr to tensors, instead of lists, it will return tensors
suggested_lrs = lr_finder.lr_suggestion()
assert pytest.approx(suggested_lrs[0].item()) == 0.21181818
assert pytest.approx(suggested_lrs[1].item()) == 0.31272727
def test_lr_suggestion_mnist(lr_finder, mnist_to_save, dummy_engine_mnist, mnist_dataloader):
max_iters = 50
with lr_finder.attach(dummy_engine_mnist, mnist_to_save, diverge_th=2, step_mode="linear") as trainer_with_finder:
with trainer_with_finder.add_event_handler(
Events.ITERATION_COMPLETED(once=max_iters), lambda _: trainer_with_finder.terminate()
):
trainer_with_finder.run(mnist_dataloader)
assert 1e-4 <= lr_finder.lr_suggestion() <= 2
def test_apply_suggested_lr_unmatched_optimizers(
lr_finder, mnist_to_save, dummy_engine_mnist, optimizer_multiple_param_groups, mnist_dataloader
):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
sug_lr = lr_finder.lr_suggestion()
with pytest.raises(RuntimeError, match=r"The number of parameter groups does not match"):
lr_finder.apply_suggested_lr(optimizer_multiple_param_groups)
def test_apply_suggested_lr_single_param_groups(
lr_finder, mnist_to_save, dummy_engine_mnist, mnist_optimizer, mnist_dataloader
):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
sug_lr = lr_finder.lr_suggestion()
lr_finder.apply_suggested_lr(mnist_optimizer)
assert mnist_optimizer.param_groups[0]["lr"] == sug_lr
def test_apply_suggested_lr_multiple_param_groups(
lr_finder,
to_save_mulitple_param_groups,
dummy_engine_mulitple_param_groups,
optimizer_multiple_param_groups,
dataloader_plot,
):
with lr_finder.attach(dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups) as trainer_with_finder:
trainer_with_finder.run(dataloader_plot)
sug_lr = lr_finder.lr_suggestion()
lr_finder.apply_suggested_lr(optimizer_multiple_param_groups)
for i in range(len(sug_lr)):
assert optimizer_multiple_param_groups.param_groups[i]["lr"] == sug_lr[i]
def test_no_matplotlib(no_site_packages, lr_finder):
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed"):
lr_finder.plot()
def test_plot_single_param_group(dirname, lr_finder, mnist_to_save, dummy_engine_mnist, mnist_dataloader):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save, end_lr=20.0, smooth_f=0.04) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
def _test(ax):
assert ax is not None
assert ax.get_xscale() == "log"
assert ax.get_xlabel() == "Learning rate"
assert ax.get_ylabel() == "Loss"
filepath = Path(dirname) / "dummy.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
filepath.unlink()
lr_finder.plot()
ax = lr_finder.plot(skip_end=0)
_test(ax)
# Passing axes object
from matplotlib import pyplot as plt
_, ax = plt.subplots()
lr_finder.plot(skip_end=0, ax=ax)
_test(ax)
def test_plot_multiple_param_groups(
dirname, lr_finder, to_save_mulitple_param_groups, dummy_engine_mulitple_param_groups, dataloader_plot
):
with lr_finder.attach(
dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups, end_lr=20.0, smooth_f=0.04
) as trainer_with_finder:
trainer_with_finder.run(dataloader_plot)
def _test(ax):
assert ax is not None
assert ax.get_xscale() == "log"
assert ax.get_xlabel() == "Learning rate"
assert ax.get_ylabel() == "Loss"
filepath = Path(dirname) / "dummy_muliple_param_groups.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
filepath.unlink()
ax = lr_finder.plot(skip_start=0, skip_end=0)
_test(ax)
# Passing axes object
from matplotlib import pyplot as plt
_, ax = plt.subplots()
lr_finder.plot(skip_start=0, skip_end=0, ax=ax)
_test(ax)
def _test_distrib_log_lr_and_loss(device):
from ignite.handlers import ParamScheduler
lr_finder = FastaiLRFinder()
_lr_schedule = MagicMock(spec=ParamScheduler)
# minimal setup for lr_finder to make _log_lr_and_loss work
rank = idist.get_rank()
loss = 0.01 * (rank + 1)
engine = Engine(lambda e, b: None)
engine.state.output = loss
engine.state.iteration = 1
lr_finder._lr_schedule = _lr_schedule
lr_finder._history["loss"] = []
lr_finder._history["lr"] = []
lr_finder._log_lr_and_loss(engine, output_transform=lambda x: x, smooth_f=0.1, diverge_th=10.0)
expected_loss = idist.all_reduce(loss)
assert pytest.approx(lr_finder._history["loss"][-1]) == expected_loss
def _test_distrib_integration_mnist(dirname, device):
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root="/tmp", transform=data_transform, train=True), batch_size=256, shuffle=True
)
class DummyModel(nn.Module):
def __init__(self, n_channels=10, out_channels=1, flatten_input=False):
super(DummyModel, self).__init__()
self.net = nn.Sequential(
nn.Flatten() if flatten_input else nn.Identity(), nn.Linear(n_channels, out_channels)
)
def forward(self, x):
return self.net(x)
model = DummyModel(n_channels=784, out_channels=10, flatten_input=True)
model = model.to(device)
optimizer = SGD(model.parameters(), lr=1e-4, momentum=0.0)
to_save = {"model": model, "optimizer": optimizer}
engine = create_supervised_trainer(model, optimizer, nn.CrossEntropyLoss(), device=device)
lr_finder = FastaiLRFinder()
with lr_finder.attach(engine, to_save) as trainer_with_finder:
trainer_with_finder.run(train_loader)
lr_finder.plot()
if idist.get_rank() == 0:
ax = lr_finder.plot(skip_end=0)
filepath = Path(dirname) / "distrib_dummy.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
sug_lr = lr_finder.lr_suggestion()
assert 1e-3 <= sug_lr <= 1
lr_finder.apply_suggested_lr(optimizer)
assert optimizer.param_groups[0]["lr"] == sug_lr
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(dirname, distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(dirname, distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
device = idist.device()
assert "xla" in device.type
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
def _test_distrib_log_lr_and_loss_xla_nprocs(index, dirname):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_xla_nprocs(dirname, xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_log_lr_and_loss_xla_nprocs, args=(dirname,), nprocs=n)
|
import pytest
from ignite.engine.engine import Engine, Events
from ignite.handlers import EpochOutputStore
@pytest.fixture
def dummy_evaluator():
def dummy_process_function(engine, batch):
return 1, 0
dummy_evaluator = Engine(dummy_process_function)
return dummy_evaluator
@pytest.fixture
def eos():
return EpochOutputStore()
def test_no_transform(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [(1, 0)]
def test_transform(dummy_evaluator):
eos = EpochOutputStore(output_transform=lambda x: x[0])
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [1]
def test_reset(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(2))
eos.reset()
assert eos.data == []
def test_update_one_iteration(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert len(eos.data) == 1
def test_update_five_iterations(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(5))
assert len(eos.data) == 5
def test_attatch(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
assert dummy_evaluator.has_event_handler(eos.reset, Events.EPOCH_STARTED)
assert dummy_evaluator.has_event_handler(eos.update, Events.ITERATION_COMPLETED)
def test_store_data(dummy_evaluator, eos):
eos.attach(dummy_evaluator, name="eval_data")
dummy_evaluator.run(range(1))
assert dummy_evaluator.state.eval_data == eos.data
|
import numpy as np
import pytest
import torch
from ignite.engine import Engine, Events, State
from ignite.handlers import TerminateOnNan
@pytest.mark.parametrize(
"state_output,should_terminate",
[
(1.0, False),
(torch.tensor(123.45), False),
(torch.asin(torch.tensor([1.0, 2.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])), True),
(torch.asin(torch.randn(4, 4)), True),
((10.0, 1.0 / torch.tensor([1.0, 2.0, 0.0, 3.0]), 1.0), True),
((1.0, torch.tensor(1.0), "abc"), False),
(1.0 / torch.randint(0, 2, size=(4, 4)).type(torch.float), True),
((float("nan"), 10.0), True),
(float("inf"), True),
([float("nan"), 10.0], True),
(np.array([1.0, 2.0]), False),
],
)
def test_terminate_on_nan_and_inf(state_output, should_terminate):
torch.manual_seed(12)
def update_fn(engine, batch):
pass
trainer = Engine(update_fn)
trainer.state = State()
h = TerminateOnNan()
trainer.state.output = state_output
if isinstance(state_output, np.ndarray):
h._output_transform = lambda x: x.tolist()
h(trainer)
assert trainer.should_terminate == should_terminate
def test_with_terminate_on_nan():
torch.manual_seed(12)
data = [1.0, 0.8, (torch.rand(4, 4), torch.rand(4, 4)), torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 5
def test_with_terminate_on_inf():
torch.manual_seed(12)
data = [
1.0,
0.8,
torch.rand(4, 4),
(1.0 / torch.randint(0, 2, size=(4,)).type(torch.float), torch.tensor(1.234)),
torch.rand(5),
torch.asin(torch.randn(4, 4)),
0.0,
1.0,
]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 4
def test_without_terminate_on_nan_inf():
data = [1.0, 0.8, torch.rand(4, 4), (torch.rand(5), torch.rand(5, 4)), 0.0, 1.0]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == len(data) * 2
|
import os
import stat
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, global_step_from_engine, ModelCheckpoint
from ignite.handlers.checkpoint import BaseSaveHandler
_PREFIX = "PREFIX"
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
class DummyPretrainedModel(nn.Module):
def __init__(self):
super(DummyPretrainedModel, self).__init__()
self.features = nn.Linear(4, 2, bias=False)
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
def test_checkpoint_wrong_input():
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint(12, lambda x: x, "prefix")
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint([12], lambda x: x, "prefix")
to_save = {"model": model}
with pytest.raises(
TypeError,
match=r"Argument `save_handler` should be a string or Path object or callable or inherit from BaseSaveHandler",
):
Checkpoint(to_save, 12, "prefix")
with pytest.raises(TypeError, match=r"global_step_transform should be a function."):
Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name="acc", global_step_transform=123)
with pytest.raises(ValueError, match=r"Cannot have key 'checkpointer' if `include_self` is True"):
Checkpoint({"checkpointer": model}, lambda x: x, include_self=True)
class ImmutableMapping(Mapping):
def __getitem__(self, key):
return to_save[key]
def __iter__(self):
return iter(to_save)
def __len__(self):
return len(to_save)
with pytest.raises(TypeError, match="If `include_self` is True, then `to_save` must be mutable"):
Checkpoint(ImmutableMapping(), lambda x: x, include_self=True)
checkpoint = Checkpoint(to_save, lambda x: x)
with pytest.raises(AttributeError, match="Checkpoint's `save_handler` should be of type `DiskSaver`"):
checkpoint.reload_objects(to_save)
def test_save_handler_as_str(dirname):
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=dirname)
assert isinstance(checkpointer.save_handler, DiskSaver)
def test_checkpoint_score_function_wrong_output():
to_save = {"model": model}
checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {"1": 1}, score_name="acc")
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
with pytest.raises(ValueError, match=r"Output of score_function should be a number"):
checkpointer(trainer)
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_default(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, f"{name}_0.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
save_handler.assert_called_with(obj, f"{name}_1234.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.pt")
assert checkpointer.last_checkpoint == f"{name}_1234.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_include_self_state_dict(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, include_self=True)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
fname = f"{name}_0.pt"
obj["checkpointer"] = OrderedDict([("saved", [(0, fname)])])
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, fname, metadata)
# Swap object, state should be maintained
checkpointer2 = Checkpoint(to_save, save_handler=save_handler, include_self=True)
checkpointer2.load_state_dict(checkpointer.state_dict())
assert checkpointer2.last_checkpoint == fname
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer2(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
# This delete only happens if state was restored correctly.
save_handler.remove.assert_called_with(f"{name}_0.pt")
fname = f"{name}_1234.pt"
obj["checkpointer"] = OrderedDict([("saved", [(1234, fname)])])
save_handler.assert_called_with(obj, fname, metadata)
assert save_handler.remove.call_count == 1
assert checkpointer2.last_checkpoint == fname
def test_checkpoint_with_dp():
dp_model = nn.DataParallel(model)
to_save = {"model": dp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
@pytest.mark.parametrize("filename_prefix", ["", "dummytask"])
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_global_step_transform(filename_prefix, to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
global_step_transform=lambda e, _: e.state.epoch,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=2, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
if len(filename_prefix) > 0:
filename_prefix += "_"
metadata = {"basename": f"{filename_prefix}{name}", "score_name": None, "priority": 2}
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_2.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{filename_prefix}{name}_2.pt")
assert checkpointer.last_checkpoint == f"{filename_prefix}{name}_12.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_score_function(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = 0.78
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_0.7800.pt"
def test_checkpoint_with_score_name_only():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_score_name_and_function(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name="loss", score_function=lambda e: e.state.score
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=-0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "loss", "priority": -0.77}
save_handler.assert_called_with(obj, f"{name}_loss=-0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = -0.76
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = -0.76
save_handler.assert_called_with(obj, f"{name}_loss=-0.7600.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_loss=-0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_loss=-0.7600.pt"
def test_checkpoint_with_int_score():
def _test(to_save, obj, name, score_name=None):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch
)
if score_name is None:
score_name = ""
else:
score_name += "="
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": score_name[:-1] if len(score_name) > 0 else None, "priority": 1}
save_handler.assert_called_with(obj, f"{name}_{score_name}1.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{name}_{score_name}12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_{score_name}1.pt")
assert checkpointer.last_checkpoint == f"{name}_{score_name}12.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
_test(to_save, model.state_dict(), "model", "epoch")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint", "epoch")
def test_checkpoint_with_score_function_and_trainer_epoch():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_0.7800.pt"
def test_checkpoint_with_score_name_and_function_and_trainer_epoch():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
def test_checkpoint_last_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=1, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_9.pt"
def test_checkpoint_last_checkpoint_on_score():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
n_saved=None,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
trainer = Engine(lambda e, b: None)
val_acc = 0.0
for i in range(10):
val_acc = i * 0.1
trainer.state = State(epoch=1, iteration=i, metrics={"val_acc": val_acc})
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_val_acc=0.9000.pt"
def test_checkpoint_save_handler_callable():
def save_handler(c, f):
assert f == "model_12.pt"
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=12)
checkpointer(trainer)
def test_model_checkpoint_args_validation(dirname):
existing = dirname / "existing_dir"
nonempty = dirname / "nonempty"
existing.mkdir(parents=True)
nonempty.mkdir(parents=True)
with open(nonempty / f"{_PREFIX}_name_0.pt", "w"):
pass
with pytest.raises(ValueError, match=r"with extension '.pt' are already present "):
ModelCheckpoint(nonempty, _PREFIX)
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
ModelCheckpoint(dirname / "non_existing_dir", _PREFIX, create_dir=False)
with pytest.raises(TypeError, match=r"global_step_transform should be a function"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
assert h.last_checkpoint is None
with pytest.raises(RuntimeError, match=r"No objects to checkpoint found."):
h(None, [])
def test_model_checkpoint_simple_recovery(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
to_load = {"model": DummyModel()}
h.reload_objects(to_load=to_load, global_step=1)
assert to_load["model"].state_dict() == model.state_dict()
@pytest.mark.parametrize("ext, require_empty", [(".txt", True), (".pt", False)])
def test_model_checkpoint_simple_recovery_from_existing_non_empty(ext, require_empty, dirname):
previous_fname = dirname / f"{_PREFIX}_obj_{1}{ext}"
with open(previous_fname, "w") as f:
f.write("test")
h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
ext = ".pt"
assert isinstance(fname, Path)
assert dirname / f"{_PREFIX}_model_{1}{ext}" == fname
assert fname.exists()
assert previous_fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
to_load = {"model": DummyModel()}
h.reload_objects(to_load=to_load, global_step=1)
assert to_load["model"].state_dict() == model.state_dict()
fname.unlink()
def test_model_checkpoint_invalid_save_handler(dirname):
h = ModelCheckpoint(dirname, _PREFIX)
to_save = {"model": DummyModel()}
# Redefine save_handler
h.save_handler = lambda x, y: None
h(Engine(lambda x, y: None), to_save)
with pytest.raises(
RuntimeError, match=rf"Internal error, save_handler should be DiskSaver, but has {type(h.save_handler)}."
):
h.last_checkpoint
def test_disk_saver_atomic(dirname):
model = DummyModel()
to_save_serializable = {"model": model}
to_save_non_serializable = {"model": lambda x: x}
def _test_existence(atomic, _to_save, expected):
saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(_to_save, fname)
except Exception:
pass
fp = saver.dirname / fname
assert fp.exists() == expected
if expected:
# related to https://github.com/pytorch/ignite/issues/1876
mode = stat.filemode(fp.stat().st_mode)
assert [mode[1], mode[4], mode[7]] == ["r", "r", "r"], mode
if expected:
saver.remove(fname)
_test_existence(atomic=False, _to_save=to_save_serializable, expected=True)
_test_existence(atomic=False, _to_save=to_save_non_serializable, expected=True)
_test_existence(atomic=True, _to_save=to_save_serializable, expected=True)
_test_existence(atomic=True, _to_save=to_save_non_serializable, expected=False)
@pytest.mark.skipif(
Version(torch.__version__) < Version("1.4.0"), reason="Zipfile serialization was introduced in 1.4.0"
)
def test_disk_saver_zipfile_serialization_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, _use_new_zipfile_serialization=False)
fname = "test.pt"
saver(to_save, fname)
fp = saver.dirname / fname
assert fp.exists()
saver.remove(fname)
def test_disk_saver_unknown_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, unknown_keyword="")
fname = "test.pt"
with pytest.raises(TypeError, match=r"got an unexpected keyword argument 'unknown_keyword'"):
saver(to_save, fname)
def test_last_k(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
for i in range(1, 9):
engine.state.iteration = i
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i}.pt" for i in [7, 8]]
assert sorted(os.listdir(dirname)) == expected, f"{sorted(os.listdir(dirname))} vs {expected}"
def test_disabled_n_saved(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
num_iters = 100
for i in range(num_iters):
engine.state.iteration = i
h(engine, to_save)
saved_files = sorted(os.listdir(dirname))
assert len(saved_files) == num_iters, f"{saved_files}"
expected = sorted([f"{_PREFIX}_model_{i}.pt" for i in range(num_iters)])
assert saved_files == expected, f"{saved_files} vs {expected}"
def test_best_k(dirname):
scores = iter([1.2, -2.0, 3.1, -4.0])
def score_function(_):
return next(scores)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i:.4f}.pt" for i in [1.2, 3.1]]
assert sorted(os.listdir(dirname)) == expected
def test_best_k_with_suffix(dirname):
scores = [0.3456789, 0.1234, 0.4567, 0.134567]
scores_iter = iter(scores)
def score_function(engine):
return next(scores_iter)
h = ModelCheckpoint(
dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name="val_loss"
)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
engine.state.epoch += 1
h(engine, to_save)
expected = [f"{_PREFIX}_model_val_loss={scores[e - 1]:.4}.pt" for e in [1, 3]]
assert sorted(os.listdir(dirname)) == expected
def test_removes_each_score_at_most_once(dirname):
scores = [0, 1, 1, 2, 3]
scores_iter = iter(scores)
def score_function(_):
return next(scores_iter)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(len(scores)):
h(engine, to_save)
# If a score was removed multiple times, the code above would have raise a
# FileNotFoundError. So this just tests the absence of such a failure
# without futher assertions.
def test_with_engine(dirname):
def update_fn(_1, _2):
pass
name = "model"
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=4)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [3 * 2, 4 * 2]])
assert sorted(os.listdir(dirname)) == expected
def test_with_state_dict(dirname):
def update_fn(_1, _2):
pass
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1, 2], max_epochs=4)
saved_model = dirname / os.listdir(dirname)[0]
load_model = torch.load(saved_model)
assert not isinstance(load_model, DummyModel)
assert isinstance(load_model, dict)
model_state_dict = model.state_dict()
loaded_model_state_dict = load_model
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.numpy() == loaded_model_value.numpy()
def test_valid_state_dict_save(dirname):
model = DummyModel()
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
to_save = {"name": 42}
with pytest.raises(TypeError, match=r"should have `state_dict` method"):
h(engine, to_save)
to_save = {"name": model}
try:
h(engine, to_save)
except ValueError:
pytest.fail("Unexpected ValueError")
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, just_on_zero_rank=False):
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
# Below code raises: RuntimeError: torch_xla/csrc/tensor_impl.cpp:144 : XLA tensors do not have storage
# Probably related to https://github.com/pytorch/xla/issues/2576
# loss = y.pow(2.0).sum()
loss = y.sum()
print(loss.device, y.device, x.device)
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
if (not just_on_zero_rank) or (just_on_zero_rank and idist.get_rank() == 0):
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)
engine.add_event_handler(
Events.EPOCH_COMPLETED, handler, {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
)
engine.run([0, 1, 2], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = dirname / saved_objects[0]
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
def test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname)
def _test_save_model_optimizer_lr_scheduler_with_validation(device, dirname, just_on_zero_rank=False):
torch.manual_seed(23)
def _build_objects(acc_list):
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
loss = y.pow(2.0).sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
trainer = Engine(update_fn)
evaluator = Engine(lambda e, b: None)
acc_iter = iter(acc_list)
@evaluator.on(Events.EPOCH_COMPLETED)
def setup_result():
evaluator.state.metrics["accuracy"] = next(acc_iter)
@trainer.on(Events.EPOCH_COMPLETED)
def run_eval():
evaluator.run([0, 1, 2])
def score_function(engine):
return engine.state.metrics["accuracy"]
save_handler = DiskSaver(dirname, create_dir=True, require_empty=False)
early_stop = EarlyStopping(score_function=score_function, patience=2, trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, early_stop)
checkpointer = Checkpoint(
{
"trainer": trainer,
"model": model,
"optim": optim,
"lr_scheduler": lr_scheduler,
"early_stop": early_stop,
},
save_handler,
include_self=True,
global_step_transform=global_step_from_engine(trainer),
)
evaluator.add_event_handler(Events.COMPLETED, checkpointer)
return trainer, evaluator, model, optim, lr_scheduler, early_stop, checkpointer
trainer, evaluator, model, optim, scheduler, early, checkpointer = _build_objects([0.2, 0.3, 0.2])
trainer.run([0, 1, 2], max_epochs=3)
saved_objects = sorted(os.listdir(dirname))
saved_checkpoint = dirname / saved_objects[0]
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["trainer", "model", "optim", "lr_scheduler", "early_stop", "checkpointer"]:
assert f in loaded_obj
trainer2, evaluator2, model2, optim2, scheduler2, early2, checkpointer2 = _build_objects([0.1, 0.1, 0.1])
Checkpoint.load_objects(
{
"trainer": trainer2,
"model": model2,
"optim": optim2,
"lr_scheduler": scheduler2,
"early_stop": early2,
"checkpointer": checkpointer2,
},
loaded_obj,
)
assert checkpointer2.last_checkpoint == checkpointer.last_checkpoint
model_state_dict = model.cpu().state_dict()
loaded_model_state_dict = model2.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
loaded_optimizer_state_dict = optim2.state_dict()
# "params" contains tensor IDs, which are different
del optim_state_dict["param_groups"][0]["params"]
del loaded_optimizer_state_dict["param_groups"][0]["params"]
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
def _check_state_dict(original, loaded):
original_state_dict = original.state_dict()
loaded_state_dict = loaded.state_dict()
for key in original_state_dict.keys():
assert key in loaded_state_dict
original_value = original_state_dict[key]
loaded_value = loaded_state_dict[key]
assert original_value == loaded_value
_check_state_dict(trainer, trainer2)
_check_state_dict(scheduler, scheduler2)
_check_state_dict(early, early2)
_check_state_dict(checkpointer, checkpointer2)
trainer2.run([0, 1, 2], max_epochs=6)
# early stopping should have triggered
assert trainer2.state.epoch == 4
# If Checkpoint's state was restored correctly, it should continue to respect n_saved
# and delete old checkpoints, and have the correct last_checkpoint.
assert os.listdir(dirname) == ["checkpoint_4.pt"]
assert checkpointer2.last_checkpoint == dirname / "checkpoint_4.pt"
def test_save_model_optimizer_lr_scheduler_with_validation(dirname):
_test_save_model_optimizer_lr_scheduler_with_validation("cpu", dirname)
def test_checkpoint_load_objects():
with pytest.raises(TypeError, match=r"Argument checkpoint should be a string or a dictionary"):
Checkpoint.load_objects({}, [])
with pytest.raises(TypeError, match=r"should have `load_state_dict` method"):
Checkpoint.load_objects({"a": None}, {"a": None})
model = DummyModel()
to_load = {"model": model, "another_model": model}
with pytest.raises(ValueError, match=r"from `to_load` is not found in the checkpoint"):
Checkpoint.load_objects(to_load, {})
model = DummyModel()
to_load = {"model": model}
model2 = DummyModel()
chkpt = {"model": model2.state_dict()}
Checkpoint.load_objects(to_load, chkpt)
assert model.state_dict() == model2.state_dict()
def test_checkpoint_load_objects_from_saved_file(dirname):
def _get_single_obj_to_save():
model = DummyModel()
to_save = {"model": model}
return to_save
def _get_multiple_objs_to_save():
model = DummyModel()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
return to_save
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
# case: load from filepath
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
Checkpoint.load_objects(to_save, str(fname))
Checkpoint.load_objects(to_save, fname)
fname.unlink()
# case: multiple objects
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
fname.unlink()
# case: saved multiple objects, loaded single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
to_load = {"model": to_save["model"]}
Checkpoint.load_objects(to_load, loaded_objects)
fname.unlink()
# case: single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_single_obj_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
fname.unlink()
def test_load_checkpoint_with_different_num_classes(dirname):
model = DummyPretrainedModel()
to_save_single_object = {"model": model}
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
handler(trainer, to_save_single_object)
fname = handler.last_checkpoint
loaded_checkpoint = torch.load(fname)
to_load_single_object = {"pretrained_features": model.features}
with pytest.raises(RuntimeError):
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah="blah")
loaded_weights = to_load_single_object["pretrained_features"].state_dict()["weight"]
assert torch.all(model.state_dict()["features.weight"].eq(loaded_weights))
def test_disksaver_wrong_input(dirname):
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
DiskSaver("/tmp/non-existing-folder", create_dir=False)
def _test(ext):
previous_fname = dirname / f"{_PREFIX}_obj_{1}{ext}"
with open(previous_fname, "w") as f:
f.write("test")
with pytest.raises(ValueError, match=r"with extension '.pt' are already present"):
DiskSaver(dirname, require_empty=True)
_test(".pt")
def _test_checkpoint_with_ddp(device):
torch.manual_seed(0)
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
to_save = {"model": ddp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
def _test_checkpoint_load_objects_ddp(device):
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01)
# single object:
to_load = {"model": ddp_model}
checkpoint = ddp_model.module.state_dict()
Checkpoint.load_objects(to_load, checkpoint)
# multiple objects:
to_load = {"model": ddp_model, "opt": opt}
checkpoint = {"model": ddp_model.module.state_dict(), "opt": opt.state_dict()}
Checkpoint.load_objects(to_load, checkpoint)
def _test_checkpoint_with_ZeRO(device, dirname, local_rank):
from torch.distributed.optim import ZeroRedundancyOptimizer
model = DummyModel().to(device)
opt = ZeroRedundancyOptimizer(model.parameters(), torch.optim.SGD, lr=0.01)
mocked_opt = MagicMock(ZeroRedundancyOptimizer, wraps=opt)
# A `step` should be called to optimizer state get populated.
out = model(torch.tensor([1.0], device=device))
out.backward()
mocked_opt.step()
to_save = {"model": model, "optim": mocked_opt}
checkpointer = Checkpoint(to_save, dirname, save_on_rank=1)
engine = Engine(lambda e, b: None)
checkpointer(engine)
mocked_opt.consolidate_state_dict.assert_called_once_with(to=1)
if local_rank == 1:
loaded_state_dict = torch.load(dirname / "checkpoint_0.pt", map_location=device)["optim"]
state_dict = opt.state_dict()
assert loaded_state_dict == state_dict
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo, dirname, get_rank_zero_dirname, local_rank):
device = idist.device()
rank_zero_dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, rank_zero_dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, rank_zero_dirname / "2", just_on_zero_rank=True)
_test_checkpoint_with_ddp(device)
_test_checkpoint_load_objects_ddp(device)
from ignite.handlers.checkpoint import HAVE_ZERO
if HAVE_ZERO:
_test_checkpoint_with_ZeRO(device, dirname, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):
device = idist.device()
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname / "2", just_on_zero_rank=True)
_test_checkpoint_with_ddp(device=device)
_test_checkpoint_load_objects_ddp(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor, get_rank_zero_dirname):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
dirname = get_rank_zero_dirname()
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
(device, dirname / "1"),
np=nproc,
do_init=True,
)
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
("cpu", dirname / "2", True),
np=nproc,
do_init=True,
)
def _test_tpu_saves_to_cpu(device, dirname):
torch.manual_seed(0)
h = ModelCheckpoint(dirname, _PREFIX)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel().to(device)
to_save = {"model": model}
h(engine, to_save)
idist.barrier()
fname = h.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.cpu().state_dict()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
assert "xla" in idist.device().type
_test_tpu_saves_to_cpu(idist.device(), dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), dirname / "2")
def _test_tpu_saves_to_cpu_nprocs(index, dirname):
device = idist.device()
_test_tpu_saves_to_cpu(device, dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname / "2")
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_xla_nprocs(xmp_executor, dirname):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)
def _test_checkpoint_filename_pattern_helper(
to_save,
filename_prefix="",
score_function=None,
score_name=None,
global_step_transform=None,
filename_pattern=None,
dirname=None,
):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=12, iteration=203, score=0.9999)
checkpointer(trainer)
return checkpointer.last_checkpoint
def _test_model_checkpoint_filename_pattern_helper(
to_save,
filename_prefix="",
score_function=None,
score_name=None,
global_step_transform=None,
filename_pattern=None,
dirname=None,
):
checkpointer = ModelCheckpoint(
dirname=dirname,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
require_empty=False,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=12, iteration=203, score=0.9999)
checkpointer(trainer, to_save)
return Path(checkpointer.last_checkpoint).name
@pytest.mark.parametrize("test_class", ["checkpoint", "model_checkpoint"])
def test_checkpoint_filename_pattern(test_class, dirname):
if test_class == "checkpoint":
_test = _test_checkpoint_filename_pattern_helper
elif test_class == "model_checkpoint":
_test = _test_model_checkpoint_filename_pattern_helper
model = DummyModel()
to_save = {"model": model}
assert _test(to_save, dirname=dirname) == "model_203.pt"
assert _test(to_save, "best", dirname=dirname) == "best_model_203.pt"
assert _test(to_save, score_function=lambda e: e.state.score, dirname=dirname) == "model_0.9999.pt"
res = _test(
to_save,
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "model_12_0.9999.pt"
assert (
_test(to_save, score_function=lambda e: e.state.score, score_name="acc", dirname=dirname)
== "model_acc=0.9999.pt"
)
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "model_12_acc=0.9999.pt"
assert _test(to_save, "best", score_function=lambda e: e.state.score, dirname=dirname) == "best_model_0.9999.pt"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "best_model_12_0.9999.pt"
res = _test(to_save, "best", score_function=lambda e: e.state.score, score_name="acc", dirname=dirname)
assert res == "best_model_acc=0.9999.pt"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "best_model_12_acc=0.9999.pt"
pattern = "{name}.{ext}"
assert _test(to_save, filename_pattern=pattern, dirname=dirname) == "model.pt"
pattern = "chk-{name}--{global_step}.{ext}"
assert _test(to_save, to_save, filename_pattern=pattern, dirname=dirname) == "chk-model--203.pt"
pattern = "chk-{filename_prefix}--{name}--{global_step}.{ext}"
assert _test(to_save, "best", filename_pattern=pattern, dirname=dirname) == "chk-best--model--203.pt"
pattern = "chk-{name}--{score}.{ext}"
assert (
_test(to_save, score_function=lambda e: e.state.score, filename_pattern=pattern, dirname=dirname)
== "chk-model--0.9999.pt"
)
pattern = "{global_step}-{name}-{score}.chk.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "12-model-0.9999.chk.pt"
pattern = "chk-{name}--{score_name}--{score}.{ext}"
res = _test(
to_save, score_function=lambda e: e.state.score, score_name="acc", filename_pattern=pattern, dirname=dirname
)
assert res == "chk-model--acc--0.9999.pt"
pattern = "chk-{name}-{global_step}-{score_name}-{score}.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "chk-model-12-acc-0.9999.pt"
pattern = "{filename_prefix}-{name}-{score}.chk"
res = _test(to_save, "best", score_function=lambda e: e.state.score, filename_pattern=pattern, dirname=dirname)
assert res == "best-model-0.9999.chk"
pattern = "resnet-{filename_prefix}-{name}-{global_step}-{score}.chk"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "resnet-best-model-12-0.9999.chk"
pattern = "{filename_prefix}-{name}-{score_name}-{score}.chk"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
filename_pattern=pattern,
dirname=dirname,
)
assert res == "best-model-acc-0.9999.chk"
pattern = "{global_step}-{filename_prefix}-{name}-{score_name}-{score}"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "12-best-model-acc-0.9999"
pattern = "SAVE-{name}-{score_name}-{score}.pth"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "SAVE-model-acc-0.9999.pth"
pattern = "{global_step}-chk-{filename_prefix}-{name}-{score_name}-{score}.{ext}"
assert _test(to_save, filename_pattern=pattern, dirname=dirname) == "203-chk--model-None-None.pt"
with pytest.raises(KeyError, match=r"random_key"):
pattern = "SAVE-{random_key}.{ext}"
_test(to_save, filename_pattern=pattern, dirname=dirname)
def test_setup_filename_pattern():
# default filename pattern
assert Checkpoint.setup_filename_pattern() == "{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False) == "{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, False, False) == "{name}_{global_step}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False) == "{name}_{global_step}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False, False) == "{name}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, True, False) == "{name}_{score_name}={score}.{ext}"
with pytest.raises(ValueError, match=r"At least one of with_score and with_global_step should be True."):
Checkpoint.setup_filename_pattern(False, False, False, False)
with pytest.raises(ValueError, match=r"If with_score_name is True, with_score should be also True"):
Checkpoint.setup_filename_pattern(True, False, True, True)
def _setup_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
trainer.state.iteration = 10
checkpointer(trainer)
trainer.state.iteration = 20
checkpointer(trainer)
assert save_handler.call_count == 3
return checkpointer
def test_checkpoint_state_dict():
checkpointer = _setup_checkpoint()
sd = checkpointer.state_dict()
assert "saved" in sd
assert isinstance(sd["saved"], list) and len(sd["saved"]) == len(checkpointer._saved)
for saved_item, true_item in zip(sd["saved"], checkpointer._saved):
assert saved_item[0] == true_item.priority
assert saved_item[1] == true_item.filename
def test_checkpoint_load_state_dict():
true_checkpointer = _setup_checkpoint()
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
sd = {"saved": [(0, "model_0.pt"), (10, "model_10.pt"), (20, "model_20.pt")]}
checkpointer.load_state_dict(sd)
assert checkpointer._saved == true_checkpointer._saved
def test_checkpoint_fixed_filename():
model = DummyModel()
to_save = {"model": model}
def _test(n_saved):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=n_saved, filename_pattern="{name}.{ext}")
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=i, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == i + 1
metadata = {"basename": "model", "score_name": None, "priority": i}
save_handler.assert_called_with(model.state_dict(), "model.pt", metadata)
_test(None)
_test(1)
_test(3)
def test_checkpoint_reset():
model = DummyModel()
to_save = {"model": model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=2)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=123)
checkpointer(trainer)
trainer.state.iteration = 234
checkpointer(trainer)
assert save_handler.call_count == 2
assert checkpointer.last_checkpoint == "model_234.pt"
assert len(checkpointer._saved) == 2
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_123.pt", "model_234.pt"])
checkpointer.reset()
assert len(checkpointer._saved) == 0
trainer.state.iteration = 124
checkpointer(trainer)
assert save_handler.call_count == 3
assert checkpointer.last_checkpoint == "model_124.pt"
assert len(checkpointer._saved) == 1
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt"])
def test_checkpoint_reset_with_engine(dirname):
name = "model"
engine = Engine(lambda e, b: None)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=10)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [9 * 2, 10 * 2]])
assert sorted(os.listdir(dirname)) == expected
assert "PREFIX_model_20.pt" in str(handler.last_checkpoint)
handler.reset()
engine.state.max_epochs = None
engine.run([0, 1], max_epochs=2)
expected += [f"{_PREFIX}_{name}_{i}.pt" for i in [1 * 2, 2 * 2]]
assert sorted(os.listdir(dirname)) == sorted(expected)
assert "PREFIX_model_4.pt" in str(handler.last_checkpoint)
def test_greater_or_equal():
scores = iter([1, 2, 2, 2])
def score_function(_):
return next(scores)
class Saver:
def __init__(self):
self.counter = 0
def __call__(self, c, f, m):
if self.counter == 0:
assert f == "model_1.pt"
else:
assert f == "model_2.pt"
self.counter += 1
handler = Saver()
checkpointer = Checkpoint(
to_save={"model": DummyModel()},
save_handler=handler,
score_function=score_function,
n_saved=2,
greater_or_equal=True,
)
trainer = Engine(lambda e, b: None)
for _ in range(4):
checkpointer(trainer)
assert handler.counter == 4
def test_greater_or_equal_model_checkpoint(dirname):
scores = iter([1, 2, 2, 2])
def score_function(_):
return next(scores)
checkpointer = ModelCheckpoint(
dirname,
score_function=score_function,
n_saved=2,
greater_or_equal=True,
)
trainer = Engine(lambda e, b: None)
to_save = {"model": DummyModel()}
for i in range(4):
checkpointer(trainer, to_save)
if i == 0:
assert Path(checkpointer.last_checkpoint).name == "model_1.pt"
else:
assert Path(checkpointer.last_checkpoint).name == "model_2.pt"
def test_get_default_score_fn():
with pytest.raises(ValueError, match=r"Argument score_sign should be 1 or -1"):
Checkpoint.get_default_score_fn("acc", 2.0)
engine = Engine(lambda e, b: None)
engine.state.metrics["acc"] = 0.9
engine.state.metrics["loss"] = 0.123
score_fn = Checkpoint.get_default_score_fn("acc")
score = score_fn(engine)
assert score == 0.9
score_fn = Checkpoint.get_default_score_fn("loss", -1)
score = score_fn(engine)
assert score == -0.123
@pytest.mark.parametrize("obj_to_save", ["optim", "trainer"])
def test_load_single_object(obj_to_save, dirname):
# Checks https://github.com/pytorch/ignite/issues/2479
trainer = Engine(lambda e, b: None)
if obj_to_save == "optim":
t = torch.tensor(0.0)
optim = torch.optim.SGD([t], lr=0.1)
to_save = {"optim": optim}
elif obj_to_save == "trainer":
to_save = {"trainer": trainer}
c = Checkpoint(to_save, save_handler=dirname)
c(trainer)
checkpoint_fp = dirname / c.last_checkpoint
Checkpoint.load_objects(to_load=to_save, checkpoint=str(checkpoint_fp))
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("atomic", [False, True])
def test_disksaver_distrib(distributed_context_single_node_gloo, dirname, local_rank, atomic):
saver = DiskSaver(dirname, atomic, save_on_rank=1)
mocked_saver = MagicMock(wraps=saver)
mocked_saver(checkpoint={}, filename="test_disksaver_distrib.pt")
if local_rank == 1:
assert (dirname / "test_disksaver_distrib.pt").exists()
else:
mocked_saver._save_func.assert_not_called()
|
import pytest
from ignite.base import Serializable
def test_state_dict():
s = Serializable()
with pytest.raises(NotImplementedError):
s.state_dict()
def test_load_state_dict():
s = Serializable()
s.load_state_dict({})
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import shutil
import sys
sys.path.insert(0, os.path.abspath("../.."))
from datetime import datetime
import pytorch_sphinx_theme
import ignite
# -- Project information -----------------------------------------------------
project = "PyTorch-Ignite"
author = "PyTorch-Ignite Contributors"
copyright = f"{datetime.now().year}, {author}"
# The short X.Y version
try:
version = os.environ["code_version"]
except KeyError:
version = ignite.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinxcontrib.katex",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinx_copybutton",
"sphinx_togglebutton",
"sphinx_design",
]
# toggle button hint text
togglebutton_hint = "Show default setup"
togglebutton_hint_hide = "Hide default setup"
# Copy defaults.rst to source/generated to be discoverable in docstrings
# Skip this step for previous versions of ignite
if os.path.exists("defaults.rst"):
src_folder = os.path.dirname(__file__)
gen_folder = os.path.join(src_folder, "generated")
os.makedirs(gen_folder, exist_ok=True)
shutil.copy(os.path.join(src_folder, "defaults.rst"), gen_folder)
# katex options
katex_prerender = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_title = f"{project} {version} Documentation"
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
"canonical_url": "https://pytorch.org/ignite/",
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
"navigation_with_keys": True,
}
html_logo = "_templates/_static/img/ignite_logo.svg"
html_favicon = "_templates/_static/img/ignite_logomark.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static", "_templates/_static"]
html_context = {
"extra_css_files": [
# 'https://fonts.googleapis.com/css?family=Lato',
# '_static/css/pytorch_theme.css'
"_static/css/ignite_theme.css",
"https://cdn.jsdelivr.net/npm/@docsearch/css@3",
],
}
html_last_updated_fmt = "%m/%d/%Y, %X"
html_permalinks = True
html_permalinks_icon = "#"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ignitedoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ignite",
"ignite Documentation",
author,
"ignite",
"One line description of project.",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Type hints configs ------------------------------------------------------
autodoc_inherit_docstrings = True
autoclass_content = "both"
autodoc_typehints = "description"
napoleon_attr_annotations = True
# -- Autosummary patch to get list of a classes, funcs automatically ----------
from importlib import import_module
from inspect import getmembers, isclass, isfunction
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx.ext.autosummary import Autosummary
class AutolistAutosummary(Autosummary):
"""Autosummary with autolisting for modules.
By default it tries to import all public names (__all__),
otherwise import all classes and/or functions in a module.
Options:
- :autolist: option to get list of classes and functions from currentmodule.
- :autolist-classes: option to get list of classes from currentmodule.
- :autolist-functions: option to get list of functions from currentmodule.
Example Usage:
.. currentmodule:: ignite.metrics
.. autosummary::
:nosignatures:
:autolist:
"""
# Add new option
_option_spec = Autosummary.option_spec.copy()
_option_spec.update(
{
"autolist": directives.unchanged,
"autolist-classes": directives.unchanged,
"autolist-functions": directives.unchanged,
}
)
option_spec = _option_spec
def run(self):
for auto in ("autolist", "autolist-classes", "autolist-functions"):
if auto in self.options:
# Get current module name
module_name = self.env.ref_context.get("py:module")
# Import module
module = import_module(module_name)
# Get public names (if possible)
try:
names = getattr(module, "__all__")
except AttributeError:
# Get classes defined in the module
cls_names = [
name[0]
for name in getmembers(module, isclass)
if name[-1].__module__ == module_name and not (name[0].startswith("_"))
]
# Get functions defined in the module
fn_names = [
name[0]
for name in getmembers(module, isfunction)
if (name[-1].__module__ == module_name) and not (name[0].startswith("_"))
]
names = cls_names + fn_names
# It may happen that module doesn't have any defined class or func
if not names:
names = [name[0] for name in getmembers(module)]
# Filter out members w/o doc strings
names = [name for name in names if getattr(module, name).__doc__ is not None]
if auto == "autolist":
# Get list of all classes and functions inside module
names = [
name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))
]
else:
if auto == "autolist-classes":
# Get only classes
check = isclass
elif auto == "autolist-functions":
# Get only functions
check = isfunction
else:
raise NotImplementedError
names = [name for name in names if check(getattr(module, name))]
# Update content
self.content = StringList(names)
return super().run()
# --- autosummary config -----------------------------------------------------
autosummary_generate = True
# --- nitpicky config : check internal links are correct or not --------------
nitpicky = True
# ignore links which can't be referenced
nitpick_ignore = [
("py:class", ".."),
("py:class", "TextIO"),
("py:class", "torch.device"),
("py:class", "_MpDeviceLoader"),
("py:class", "torch.nn.modules.module.Module"),
("py:class", "torch.optim.optimizer.Optimizer"),
("py:class", "torch.utils.data.dataset.Dataset"),
("py:class", "torch.utils.data.sampler.BatchSampler"),
("py:class", "torch.cuda.amp.grad_scaler.GradScaler"),
("py:class", "torch.optim.lr_scheduler._LRScheduler"),
("py:class", "torch.optim.lr_scheduler.LRScheduler"),
("py:class", "torch.utils.data.dataloader.DataLoader"),
]
linkcheck_ignore = [
"https://github.com/fossasia/visdom#visdom-arguments-python-only",
"https://github.com/pytorch/ignite/tree/master/examples/cifar10#check-resume-training",
"https://github.com/pytorch/ignite/tree/master/examples/mnist#training-save--resume",
]
def setup(app):
app.add_directive("autosummary", AutolistAutosummary, override=True)
|
"""
MNIST example with training and validation monitoring using Neptune.
Requirements:
Neptune: `pip install neptune`
Usage:
Run the example:
```bash
python mnist_with_neptune_logger.py
```
Go to https://neptune.ai and explore your run.
Note:
You can view example runs here:
https://app.neptune.ai/o/common/org/pytorch-ignite-integration/
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project="common/pytorch-ignite-integration",
name="ignite-mnist-example",
)
npt_logger.experiment["params"] = {
"train_batch_size": train_batch_size,
"val_batch_size": val_batch_size,
"epochs": epochs,
"lr": lr,
"momentum": momentum,
}
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
npt_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
npt_logger.attach(
trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)
)
npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
def score_function(engine):
return engine.state.metrics["accuracy"]
handler = Checkpoint(
{"model": model},
NeptuneSaver(npt_logger),
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, handler)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
npt_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
"""
MNIST example with training and validation monitoring using TensorboardX and Tensorboard.
Requirements:
Optionally TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard_logger.py --log_dir=/tmp/tensorboard_logs
```
"""
import sys
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
TensorboardLogger,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
if sys.version_info > (3,):
from ignite.contrib.metrics.gpu_info import GpuInfo
try:
GpuInfo().attach(trainer)
except RuntimeError:
print(
"INFO: By default, in this example it is possible to log GPU information (used memory, utilization). "
"As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please "
"install it : `pip install pynvml`"
)
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
tb_logger = TensorboardLogger(log_dir=log_dir)
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
metric_names="all",
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
tb_logger.attach(
trainer,
log_handler=WeightsScalarHandler(model, whitelist=["fc1"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def is_conv(n, _):
return "conv" in n
tb_logger.attach(
trainer,
log_handler=WeightsHistHandler(model, whitelist=is_conv),
event_name=Events.ITERATION_COMPLETED(every=100),
)
tb_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
tb_logger.attach(
trainer,
log_handler=GradsHistHandler(model, whitelist=["fc2.weight"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
log_dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
tb_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_dir)
|
"""
MNIST example with training and validation monitoring using ClearML.
Requirements:
ClearML: `pip install clearml`
Usage:
Run the example:
```bash
python mnist_with_clearml_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.clearml_logger import (
ClearMLLogger,
ClearMLSaver,
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
clearml_logger = ClearMLLogger(project_name="examples", task_name="ignite")
clearml_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training metrics", train_evaluator), ("validation metrics", validation_evaluator)]:
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
clearml_logger.attach_opt_params_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer
)
clearml_logger.attach(
trainer,
log_handler=WeightsScalarHandler(model, whitelist=["fc1"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def is_conv(n, _):
return "conv" in n
clearml_logger.attach(
trainer,
log_handler=WeightsHistHandler(model, whitelist=is_conv),
event_name=Events.ITERATION_COMPLETED(every=100),
)
clearml_logger.attach(
trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)
)
clearml_logger.attach(
trainer,
log_handler=GradsHistHandler(model, whitelist=["fc2.weight"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
handler = Checkpoint(
{"model": model},
ClearMLSaver(),
n_saved=1,
score_function=lambda e: e.state.metrics["accuracy"],
score_name="val_acc",
filename_prefix="best",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
clearml_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
"""
MNIST example with training and validation monitoring using Tensorboard on TPU
Requirements:
- PyTorch >= 1.5
- PyTorch XLA >= 1.5
- Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard_on_tpu.py --log_dir=/tmp/tensorboard_logs
```
"""
from argparse import ArgumentParser
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss, RunningAverage
try:
import torch_xla.core.xla_model as xm
except ImportError:
raise ModuleNotFoundError(
"In order to run PyTorch on TPU we need to install PyTorch XLA:"
"\n\t- curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o xla-setup.py"
"\n\t- python xla-setup.py --version 1.5"
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
# Use TPU device
device = xm.xla_device()
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
# Create trainer and evaluator
trainer = create_supervised_trainer(
model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item()]
)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
tracker = xm.RateTracker()
# Add RateTracker as an output of the training step
@trainer.on(Events.ITERATION_COMPLETED)
def add_rate_tracker(engine):
tracker.add(len(engine.state.batch))
engine.state.output.append(tracker.global_rate())
# Setup output values of the training step as EMA metrics
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, "batch_loss")
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, "global_rate")
# Let's log the EMA metrics every `log_interval` iterations
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
writer.add_scalar("training/batch_loss", engine.state.metrics["batch_loss"], engine.state.iteration)
writer.add_scalar("training/global_rate", engine.state.metrics["global_rate"], engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
|
from argparse import ArgumentParser
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
# Basic model's definition
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
"""Method to setup data loaders: train_loader and val_loader"""
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size,
shuffle=True,
num_workers=4,
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size,
shuffle=False,
num_workers=4,
)
return train_loader, val_loader
def log_model_weights(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model weights: print and dump into a file"""
assert model and fp
output = {"total": 0.0}
max_counter = 5
for name, p in model.named_parameters():
name = name.replace(".", "/")
n = torch.norm(p)
if max_counter > 0:
output[name] = n
output["total"] += n
max_counter -= 1
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def log_model_grads(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model gradients: print and dump into a file"""
assert model and fp
output = {"grads/total": 0.0}
max_counter = 5
for name, p in model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
n = torch.norm(p.grad)
if max_counter > 0:
output[f"grads/{name}"] = n
output["grads/total"] += n
max_counter -= 1
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def log_data_stats(engine, fp=None, **kwargs):
"""Helper method to log mean/std of input batch of images and median of batch of targets."""
assert fp
x, y = engine.state.batch
output = {
"batch xmean": x.mean().item(),
"batch xstd": x.std().item(),
"batch ymedian": y.median().item(),
}
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def run(
train_batch_size,
val_batch_size,
epochs,
lr,
momentum,
log_interval,
log_dir,
checkpoint_every,
resume_from,
crash_iteration=-1,
deterministic=False,
):
# Setup seed to have same model's initialization:
manual_seed(75)
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
criterion = nn.NLLLoss()
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
lr_scheduler = StepLR(optimizer, step_size=1, gamma=0.5)
# Setup trainer and evaluator
if deterministic:
tqdm.write("Setup deterministic trainer")
trainer = create_supervised_trainer(model, optimizer, criterion, device=device, deterministic=deterministic)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(criterion)}, device=device
)
# Apply learning rate scheduling
@trainer.on(Events.EPOCH_COMPLETED)
def lr_step(engine):
lr_scheduler.step()
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f"Epoch {0} - loss: {0:.4f} - lr: {lr:.4f}")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
lr = optimizer.param_groups[0]["lr"]
pbar.desc = f"Epoch {engine.state.epoch} - loss: {engine.state.output:.4f} - lr: {lr:.4f}"
pbar.update(log_interval)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
writer.add_scalar("lr", lr, engine.state.iteration)
if crash_iteration > 0:
@trainer.on(Events.ITERATION_COMPLETED(once=crash_iteration))
def _(engine):
raise Exception(f"STOP at {engine.state.iteration}")
if resume_from is not None:
@trainer.on(Events.STARTED)
def _(engine):
pbar.n = engine.state.iteration % engine.state.epoch_length
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
# Compute and log validation metrics
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# Setup object to checkpoint
objects_to_checkpoint = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
training_checkpoint = Checkpoint(
to_save=objects_to_checkpoint,
save_handler=DiskSaver(log_dir, require_empty=False),
n_saved=None,
global_step_transform=lambda *_: trainer.state.epoch,
)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=checkpoint_every), training_checkpoint)
# Setup logger to print and dump into file: model weights, model grads and data stats
# - first 3 iterations
# - 4 iterations after checkpointing
# This helps to compare resumed training with checkpointed training
def log_event_filter(e, event):
if event in [1, 2, 3]:
return True
elif 0 <= (event % (checkpoint_every * e.state.epoch_length)) < 5:
return True
return False
fp = Path(log_dir) / ("run.log" if resume_from is None else "resume_run.log")
fp = fp.as_posix()
for h in [log_data_stats, log_model_weights, log_model_grads]:
trainer.add_event_handler(Events.ITERATION_COMPLETED(event_filter=log_event_filter), h, model=model, fp=fp)
if resume_from is not None:
tqdm.write(f"Resume from the checkpoint: {resume_from}")
checkpoint = torch.load(resume_from)
Checkpoint.load_objects(to_load=objects_to_checkpoint, checkpoint=checkpoint)
try:
# Synchronize random states
manual_seed(15)
trainer.run(train_loader, max_epochs=epochs)
except Exception as e:
import traceback
print(traceback.format_exc())
pbar.close()
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="/tmp/mnist_save_resume", help="log directory for Tensorboard log output"
)
parser.add_argument("--checkpoint_every", type=int, default=1, help="Checkpoint training every X epochs")
parser.add_argument(
"--resume_from", type=str, default=None, help="Path to the checkpoint .pt file to resume training from"
)
parser.add_argument("--crash_iteration", type=int, default=-1, help="Iteration at which to raise an exception")
parser.add_argument(
"--deterministic", action="store_true", help="Deterministic training with dataflow synchronization"
)
args = parser.parse_args()
run(
args.batch_size,
args.val_batch_size,
args.epochs,
args.lr,
args.momentum,
args.log_interval,
args.log_dir,
args.checkpoint_every,
args.resume_from,
args.crash_iteration,
args.deterministic,
)
|
from argparse import ArgumentParser
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
try:
import visdom
except ImportError:
raise ModuleNotFoundError("No visdom package is found. Please install it with command: \n pip install visdom")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def create_plot_window(vis, xlabel, ylabel, title):
return vis.line(X=np.array([1]), Y=np.array([np.nan]), opts=dict(xlabel=xlabel, ylabel=ylabel, title=title))
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
vis = visdom.Visdom()
# if not vis.check_connection():
# raise RuntimeError("Visdom server not running. Please run python -m visdom.server")
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device
)
train_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Loss")
train_avg_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Average Loss")
train_avg_accuracy_window = create_plot_window(vis, "#Iterations", "Accuracy", "Training Average Accuracy")
val_avg_loss_window = create_plot_window(vis, "#Epochs", "Loss", "Validation Average Loss")
val_avg_accuracy_window = create_plot_window(vis, "#Epochs", "Accuracy", "Validation Average Accuracy")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
vis.line(
X=np.array([engine.state.iteration]),
Y=np.array([engine.state.output]),
update="append",
win=train_loss_window,
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=train_avg_accuracy_window, update="append"
)
vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=train_avg_loss_window, update="append")
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=val_avg_accuracy_window, update="append"
)
vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=val_avg_loss_window, update="append")
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument("--log_file", type=str, default=None, help="log file to log output to")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
"""
MNIST example with training and validation monitoring using Weights & Biases
Requirements:
Weights & Biases: `pip install wandb`
Usage:
Make sure you are logged into Weights & Biases (use the `wandb` command).
Run the example:
```bash
python mnist_with_wandb_logger.py
```
Go to https://wandb.com and explore your experiment.
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.wandb_logger import global_step_from_engine, WandBLogger
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
wandb_logger = WandBLogger(
project="pytorch-ignite-integration",
name="ignite-mnist-example",
config={
"train_batch_size": train_batch_size,
"val_batch_size": val_batch_size,
"epochs": epochs,
"lr": lr,
"momentum": momentum,
},
)
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
wandb_logger.attach_opt_params_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer
)
wandb_logger.watch(model, log="all")
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
wandb_logger.run.dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
wandb_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers import ProgressBar
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss, RunningAverage
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, display_gpu_info):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device
)
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
if display_gpu_info:
from ignite.contrib.metrics import GpuInfo
GpuInfo().attach(trainer, name="gpu")
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names="all")
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=epochs)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--display_gpu_info",
action="store_true",
help="Display gpu usage info. This needs python 3.X and pynvml package",
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.display_gpu_info)
|
"""
MNIST example with training and validation monitoring using Visdom.
Requirements:
Visdom (https://github.com/facebookresearch/visdom.git):
`pip install git+https://github.com/facebookresearch/visdom.git`
Usage:
Start visdom server:
```bash
visdom -logging_level 30
```
Run the example:
```bash
python mnist_with_visdom_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.visdom_logger import (
global_step_from_engine,
GradsScalarHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
vd_logger = VisdomLogger(env="mnist_training")
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
vd_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
vd_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
vd_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
log_dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
vd_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument("--log_dir", type=str, default="mnist_visdom_logs", help="log directory for training output")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_dir)
|
"""
MNIST example with training and validation monitoring using Tensorboard.
Requirements:
TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
or PyTorch >= 1.2 which supports Tensorboard
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard.py --log_dir=/tmp/tensorboard_logs
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
|
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f"ITERATION - loss: {0:.2f}")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = f"ITERATION - loss: {engine.state.output:.2f}"
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time(engine):
tqdm.write(f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds")
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
import argparse
import os
import random
import warnings
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
from ignite.metrics import RunningAverage
try:
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
except ImportError:
raise ModuleNotFoundError(
"Please install torchvision to run this example, for example "
"via conda by running 'conda install -c pytorch torchvision'. "
)
PRINT_FREQ = 100
FAKE_IMG_FNAME = "fake_sample_epoch_{:04d}.png"
REAL_IMG_FNAME = "real_sample_epoch_{:04d}.png"
LOGS_FNAME = "logs.tsv"
PLOT_FNAME = "plot.svg"
SAMPLES_FNAME = "samples.svg"
CKPT_PREFIX = "networks"
class Net(nn.Module):
"""A base class for both generator and the discriminator.
Provides a common weight initialization scheme.
"""
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if "Conv" in classname:
m.weight.data.normal_(0.0, 0.02)
elif "BatchNorm" in classname:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
return x
class Generator(Net):
"""Generator network.
Args:
nf (int): Number of filters in the second-to-last deconv layer
"""
def __init__(self, z_dim, nf, nc):
super(Generator, self).__init__()
self.net = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(in_channels=z_dim, out_channels=nf * 8, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(nf * 8),
nn.ReLU(inplace=True),
# state size. (nf*8) x 4 x 4
nn.ConvTranspose2d(in_channels=nf * 8, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 4),
nn.ReLU(inplace=True),
# state size. (nf*4) x 8 x 8
nn.ConvTranspose2d(in_channels=nf * 4, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 2),
nn.ReLU(inplace=True),
# state size. (nf*2) x 16 x 16
nn.ConvTranspose2d(in_channels=nf * 2, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(inplace=True),
# state size. (nf) x 32 x 32
nn.ConvTranspose2d(in_channels=nf, out_channels=nc, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
self.weights_init()
def forward(self, x):
return self.net(x)
class Discriminator(Net):
"""Discriminator network.
Args:
nf (int): Number of filters in the first conv layer.
"""
def __init__(self, nc, nf):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(in_channels=nc, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf) x 32 x 32
nn.Conv2d(in_channels=nf, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*2) x 16 x 16
nn.Conv2d(in_channels=nf * 2, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*4) x 8 x 8
nn.Conv2d(in_channels=nf * 4, out_channels=nf * 8, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*8) x 4 x 4
nn.Conv2d(in_channels=nf * 8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False),
nn.Sigmoid(),
)
self.weights_init()
def forward(self, x):
output = self.net(x)
return output.view(-1, 1).squeeze(1)
def check_manual_seed(seed):
"""If manual seed is not specified, choose a random one and communicate it to the user."""
seed = seed or random.randint(1, 10000)
random.seed(seed)
torch.manual_seed(seed)
print(f"Using manual seed: {seed}")
def check_dataset(dataset, dataroot):
"""
Args:
dataset (str): Name of the dataset to use. See CLI help for details
dataroot (str): root directory where the dataset will be stored.
Returns:
dataset (data.Dataset): torchvision Dataset object
"""
resize = transforms.Resize(64)
crop = transforms.CenterCrop(64)
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
if dataset in {"imagenet", "folder", "lfw"}:
dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([resize, crop, to_tensor, normalize]))
nc = 3
elif dataset == "lsun":
dataset = dset.LSUN(
root=dataroot, classes=["bedroom_train"], transform=transforms.Compose([resize, crop, to_tensor, normalize])
)
nc = 3
elif dataset == "cifar10":
dataset = dset.CIFAR10(
root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize])
)
nc = 3
elif dataset == "mnist":
dataset = dset.MNIST(root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize]))
nc = 1
elif dataset == "fake":
dataset = dset.FakeData(size=256, image_size=(3, 64, 64), transform=to_tensor)
nc = 3
else:
raise RuntimeError(f"Invalid dataset name: {dataset}")
return dataset, nc
def main(
dataset,
dataroot,
z_dim,
g_filters,
d_filters,
batch_size,
epochs,
learning_rate,
beta_1,
saved_G,
saved_D,
seed,
n_workers,
device,
alpha,
output_dir,
):
# seed
check_manual_seed(seed)
# data
dataset, num_channels = check_dataset(dataset, dataroot)
loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=n_workers, drop_last=True)
# netowrks
netG = Generator(z_dim, g_filters, num_channels).to(device)
netD = Discriminator(num_channels, d_filters).to(device)
# criterion
bce = nn.BCELoss()
# optimizers
optimizerG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
# load pre-trained models
if saved_G:
netG.load_state_dict(torch.load(saved_G))
if saved_D:
netD.load_state_dict(torch.load(saved_D))
# misc
real_labels = torch.ones(batch_size, device=device)
fake_labels = torch.zeros(batch_size, device=device)
fixed_noise = torch.randn(batch_size, z_dim, 1, 1, device=device)
def get_noise():
return torch.randn(batch_size, z_dim, 1, 1, device=device)
# The main function, processing a batch of examples
def step(engine, batch):
# unpack the batch. It comes from a dataset, so we have <images, labels> pairs. Discard labels.
real, _ = batch
real = real.to(device)
# -----------------------------------------------------------
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
netD.zero_grad()
# train with real
output = netD(real)
errD_real = bce(output, real_labels)
D_x = output.mean().item()
errD_real.backward()
# get fake image from generator
noise = get_noise()
fake = netG(noise)
# train with fake
output = netD(fake.detach())
errD_fake = bce(output, fake_labels)
D_G_z1 = output.mean().item()
errD_fake.backward()
# gradient update
errD = errD_real + errD_fake
optimizerD.step()
# -----------------------------------------------------------
# (2) Update G network: maximize log(D(G(z)))
netG.zero_grad()
# Update generator. We want to make a step that will make it more likely that discriminator outputs "real"
output = netD(fake)
errG = bce(output, real_labels)
D_G_z2 = output.mean().item()
errG.backward()
# gradient update
optimizerG.step()
return {"errD": errD.item(), "errG": errG.item(), "D_x": D_x, "D_G_z1": D_G_z1, "D_G_z2": D_G_z2}
# ignite objects
trainer = Engine(step)
checkpoint_handler = ModelCheckpoint(output_dir, CKPT_PREFIX, n_saved=10, require_empty=False)
timer = Timer(average=True)
# attach running average metrics
monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"]
RunningAverage(alpha=alpha, output_transform=lambda x: x["errD"]).attach(trainer, "errD")
RunningAverage(alpha=alpha, output_transform=lambda x: x["errG"]).attach(trainer, "errG")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_x"]).attach(trainer, "D_x")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z1"]).attach(trainer, "D_G_z1")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z2"]).attach(trainer, "D_G_z2")
# attach progress bar
pbar = ProgressBar()
pbar.attach(trainer, metric_names=monitoring_metrics)
@trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))
def print_logs(engine):
fname = output_dir / LOGS_FNAME
columns = ["iteration"] + list(engine.state.metrics.keys())
values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]
with open(fname, "a") as f:
if f.tell() == 0:
print("\t".join(columns), file=f)
print("\t".join(values), file=f)
message = f"[{engine.state.epoch}/{epochs}][{engine.state.iteration % len(loader)}/{len(loader)}]"
for name, value in zip(columns, values):
message += f" | {name}: {value}"
pbar.log_message(message)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def save_fake_example(engine):
fake = netG(fixed_noise)
path = output_dir / FAKE_IMG_FNAME.format(engine.state.epoch)
vutils.save_image(fake.detach(), path, normalize=True)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def save_real_example(engine):
img, y = engine.state.batch
path = output_dir / REAL_IMG_FNAME.format(engine.state.epoch)
vutils.save_image(img, path, normalize=True)
# adding handlers using `trainer.add_event_handler` method API
trainer.add_event_handler(
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"netG": netG, "netD": netD}
)
# automatically adding handlers via a special `attach` method of `Timer` handler
timer.attach(
trainer,
start=Events.EPOCH_STARTED,
resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED,
step=Events.ITERATION_COMPLETED,
)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def print_times(engine):
pbar.log_message(f"Epoch {engine.state.epoch} done. Time per batch: {timer.value():.3f}[s]")
timer.reset()
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def create_plots(engine):
try:
import matplotlib as mpl
mpl.use("agg")
import matplotlib.pyplot as plt
import pandas as pd
except ImportError:
warnings.warn("Loss plots will not be generated -- pandas or matplotlib not found")
else:
df = pd.read_csv(output_dir / LOGS_FNAME, delimiter="\t", index_col="iteration")
_ = df.plot(subplots=True, figsize=(20, 20))
_ = plt.xlabel("Iteration number")
fig = plt.gcf()
path = output_dir / PLOT_FNAME
fig.savefig(path)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EXCEPTION_RAISED)
def handle_exception(engine, e):
if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):
engine.terminate()
warnings.warn("KeyboardInterrupt caught. Exiting gracefully.")
create_plots(engine)
checkpoint_handler(engine, {"netG_exception": netG, "netD_exception": netD})
else:
raise e
# Setup is done. Now let's run the training
trainer.run(loader, epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
required=True,
choices={"cifar10", "lsun", "imagenet", "folder", "lfw", "fake", "mnist"},
help="Type of the dataset to be used.",
)
parser.add_argument("--dataroot", required=True, help="path to dataset")
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers")
parser.add_argument("--batch-size", type=int, default=64, help="input batch size")
parser.add_argument("--z-dim", type=int, default=100, help="size of the latent z vector")
parser.add_argument(
"--g-filters", type=int, default=64, help="Number of filters in the second-to-last generator deconv layer"
)
parser.add_argument("--d-filters", type=int, default=64, help="Number of filters in first discriminator conv layer")
parser.add_argument("--epochs", type=int, default=25, help="number of epochs to train for")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--beta-1", type=float, default=0.5, help="beta_1 for adam")
parser.add_argument("--no-cuda", action="store_true", help="disables cuda")
parser.add_argument("--saved-G", default="", help="path to pickled generator (to continue training)")
parser.add_argument("--saved-D", default="", help="path to pickled discriminator (to continue training)")
parser.add_argument("--output-dir", default=".", help="directory to output images and model checkpoints")
parser.add_argument("--seed", type=int, help="manual seed")
parser.add_argument("--alpha", type=float, default=0.98, help="smoothing constant for exponential moving averages")
args = parser.parse_args()
dev = "cpu" if (not torch.cuda.is_available() or args.no_cuda) else "cuda:0"
args.output_dir = Path(args.output_dir)
try:
args.output_dir.mkdir(parents=True)
except FileExistsError:
if (not args.output_dir.is_dir()) or (len(os.listdir(args.output_dir)) > 0):
raise FileExistsError("Please provide a path to a non-existing or empty directory.")
main(
dataset=args.dataset,
dataroot=args.dataroot,
z_dim=args.z_dim,
g_filters=args.g_filters,
d_filters=args.d_filters,
batch_size=args.batch_size,
epochs=args.epochs,
learning_rate=args.lr,
beta_1=args.beta_1,
saved_D=args.saved_D,
saved_G=args.saved_G,
seed=args.seed,
device=dev,
n_workers=args.workers,
alpha=args.alpha,
output_dir=args.output_dir,
)
|
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
scaler = GradScaler()
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with autocast():
y_pred = model(x)
loss = criterion(y_pred, y)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same precision that autocast used for corresponding forward ops.
scaler.scale(loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
|
import fire
import torch
from apex import amp
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10, opt="O1"):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
model, optimizer = amp.initialize(model, optimizer, opt_level=opt)
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
|
import random
from torch.utils.data import DataLoader, Subset
from torchvision.datasets.cifar import CIFAR100
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomErasing, RandomHorizontalFlip, ToTensor
def get_train_eval_loaders(path, batch_size=256):
"""Setup the dataflow:
- load CIFAR100 train and test datasets
- setup train/test image transforms
- horizontally flipped randomly and augmented using cutout.
- each mini-batch contained 256 examples
- setup train/test data loaders
Returns:
train_loader, test_loader, eval_train_loader
"""
train_transform = Compose(
[
Pad(4),
RandomCrop(32),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErasing(),
]
)
test_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_dataset = CIFAR100(root=path, train=True, transform=train_transform, download=True)
test_dataset = CIFAR100(root=path, train=False, transform=test_transform, download=False)
train_eval_indices = [random.randint(0, len(train_dataset) - 1) for i in range(len(test_dataset))]
train_eval_dataset = Subset(train_dataset, train_eval_indices)
train_loader = DataLoader(
train_dataset, batch_size=batch_size, num_workers=12, shuffle=True, drop_last=True, pin_memory=True
)
test_loader = DataLoader(
test_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)
eval_train_loader = DataLoader(
train_eval_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)
return train_loader, test_loader, eval_train_loader
|
import fire
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
|
import os
from pathlib import Path
import brevitas.nn as qnn
import torch
import torch.nn as nn
from pact import PACTReLU
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
train_transform = Compose(
[
Pad(4),
RandomCrop(32, fill=128),
RandomHorizontalFlip(),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def get_train_test_datasets(path):
path = Path(path)
if not path.exists():
path.mkdir(parents=True)
download = True
else:
download = True if len(os.listdir(path)) < 1 else False
train_ds = datasets.CIFAR10(root=path, train=True, download=download, transform=train_transform)
test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform)
return train_ds, test_ds
def get_model(name):
__dict__ = globals()
if name in models.__dict__:
fn = models.__dict__[name]
elif name in ["resnet18_QAT_8b", "resnet18_QAT_6b", "resnet18_QAT_5b", "resnet18_QAT_4b"]:
fn = __dict__[name]
else:
raise RuntimeError("Unknown model name {}".format(name))
return fn(num_classes=10)
# Below code is taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, weight_bit_width=8):
"""3x3 convolution with padding"""
return qnn.QuantConv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
weight_bit_width=weight_bit_width,
)
def conv1x1(in_planes, out_planes, stride=1, weight_bit_width=8):
"""1x1 convolution"""
return qnn.QuantConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False, weight_bit_width=weight_bit_width
)
def make_PACT_relu(bit_width=8):
relu = qnn.QuantReLU(bit_width=bit_width)
relu.act_impl = PACTReLU()
return relu
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride, weight_bit_width=bit_width)
self.bn1 = norm_layer(planes)
self.relu = make_PACT_relu(bit_width=bit_width)
self.conv2 = conv3x3(planes, planes, weight_bit_width=bit_width)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, weight_bit_width=bit_width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation, weight_bit_width=bit_width)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion, weight_bit_width=bit_width)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = make_PACT_relu(bit_width=bit_width)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_QAT_Xb(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
bit_width=8,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = qnn.QuantConv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = make_PACT_relu()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], bit_width=bit_width)
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], bit_width=bit_width
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], bit_width=bit_width
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], bit_width=bit_width
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d): # qnn.QuantConv2d includes nn.Conv2d inside.
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, bit_width=8):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, weight_bit_width=bit_width),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
bit_width=bit_width,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
bit_width=bit_width,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet_QAT_Xb(block, layers, **kwargs):
model = ResNet_QAT_Xb(block, layers, **kwargs)
return model
def resnet18_QAT_8b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet18_QAT_6b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=6, **kwargs)
def resnet18_QAT_5b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=5, **kwargs)
def resnet18_QAT_4b(*args, **kwargs):
return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=4, **kwargs)
|
from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import create_supervised_evaluator, Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-QAT-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
from clearml import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 2 best models by validation accuracy starting from num_epochs / 2:
best_model_handler = Checkpoint(
{"model": model},
get_save_handler(config),
filename_prefix="best",
n_saved=2,
global_step_transform=global_step_from_engine(trainer),
score_name="test_accuracy",
score_function=Checkpoint.get_default_score_fn("Accuracy"),
)
evaluator.add_event_handler(
Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler
)
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
logger.exception("")
raise e
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18_QAT_8b",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=1000,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
with_clearml=False,
with_amp=False,
**spawn_kwargs,
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
with_amp (bool): if True, enables native automatic mixed precision. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# check to see if the num_epochs is greater than or equal to num_warmup_epochs
if num_warmup_epochs >= num_epochs:
raise ValueError(
"num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease "
"num_warmup_epochs"
)
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
with idist.one_rank_first(local=True):
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model, find_unused_parameters=True)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Quantization Aware Training {config['model']} on CIFAR10")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
with_amp = config["with_amp"]
scaler = GradScaler(enabled=with_amp)
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
with autocast(enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return {
"batch loss": loss.item(),
}
trainer = Engine(train_step)
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
|
# Implementation taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3
# Ref: https://arxiv.org/abs/1805.06085
import torch
import torch.nn as nn
class PACTClip(torch.autograd.Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.save_for_backward(x, alpha)
return torch.clamp(x, 0, alpha.data)
@staticmethod
def backward(ctx, dy):
x, alpha = ctx.saved_tensors
dx = dy.clone()
dx[x < 0] = 0
dx[x > alpha] = 0
dalpha = dy.clone()
dalpha[x <= alpha] = 0
return dx, torch.sum(dalpha)
class PACTReLU(nn.Module):
def __init__(self, alpha=6.0):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
def forward(self, x):
return PACTClip.apply(x, self.alpha)
|
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv2.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv3.weight, init.calculate_gain("relu"))
init.orthogonal_(self.conv4.weight)
|
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from model import Net
from torch.utils.data import DataLoader
from torchvision.transforms.functional import center_crop, resize, to_tensor
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import BasicTimeProfiler
from ignite.metrics import PSNR
# Training settings
parser = argparse.ArgumentParser(description="PyTorch Super Res Example")
parser.add_argument("--crop_size", type=int, default=256, help="cropped size of the images for training")
parser.add_argument("--upscale_factor", type=int, required=True, help="super resolution upscale factor")
parser.add_argument("--batch_size", type=int, default=64, help="training batch size")
parser.add_argument("--test_batch_size", type=int, default=10, help="testing batch size")
parser.add_argument("--n_epochs", type=int, default=2, help="number of epochs to train for")
parser.add_argument("--lr", type=float, default=0.01, help="Learning Rate. Default=0.01")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training")
parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader to use")
parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123")
parser.add_argument("--debug", action="store_true", help="use debug")
opt = parser.parse_args()
print(opt)
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if not opt.mps and torch.backends.mps.is_available():
raise Exception("Found mps device, please run with --mps to enable macOS GPU")
torch.manual_seed(opt.seed)
use_mps = opt.mps and torch.backends.mps.is_available()
if opt.cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
print("===> Loading datasets")
class SRDataset(torch.utils.data.Dataset):
def __init__(self, dataset, scale_factor, crop_size=256):
self.dataset = dataset
self.scale_factor = scale_factor
self.crop_size = crop_size
def __getitem__(self, index):
image, _ = self.dataset[index]
img = image.convert("YCbCr")
hr_image, _, _ = img.split()
hr_image = center_crop(hr_image, self.crop_size)
lr_image = hr_image.copy()
if self.scale_factor != 1:
size = self.crop_size // self.scale_factor
lr_image = resize(lr_image, [size, size])
hr_image = to_tensor(hr_image)
lr_image = to_tensor(lr_image)
return lr_image, hr_image
def __len__(self):
return len(self.dataset)
trainset = torchvision.datasets.Caltech101(root="./data", download=True)
testset = torchvision.datasets.Caltech101(root="./data", download=False)
trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size)
testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size)
training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)
testing_data_loader = DataLoader(dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size)
print("===> Building model")
model = Net(upscale_factor=opt.upscale_factor).to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
def train_step(engine, batch):
model.train()
input, target = batch[0].to(device), batch[1].to(device)
optimizer.zero_grad()
loss = criterion(model(input), target)
loss.backward()
optimizer.step()
return loss.item()
def validation_step(engine, batch):
model.eval()
with torch.no_grad():
x, y = batch[0].to(device), batch[1].to(device)
y_pred = model(x)
return y_pred, y
trainer = Engine(train_step)
evaluator = Engine(validation_step)
psnr = PSNR(data_range=1)
psnr.attach(evaluator, "psnr")
validate_every = 1
if opt.debug:
epoch_length = 10
validate_epoch_length = 1
else:
epoch_length = len(training_data_loader)
validate_epoch_length = len(testing_data_loader)
@trainer.on(Events.EPOCH_COMPLETED(every=validate_every))
def log_validation():
evaluator.run(testing_data_loader, epoch_length=validate_epoch_length)
metrics = evaluator.state.metrics
print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB")
@trainer.on(Events.EPOCH_COMPLETED)
def checkpoint():
model_out_path = "model_epoch_{}.pth".format(trainer.state.epoch)
torch.save(model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
# Attach basic profiler
basic_profiler = BasicTimeProfiler()
basic_profiler.attach(trainer)
ProgressBar().attach(trainer, output_transform=lambda x: {"loss": x})
trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length)
results = basic_profiler.get_results()
basic_profiler.print_results(results)
|
import argparse
import numpy as np
import torch
from PIL import Image
from torchvision.transforms.functional import to_tensor
# Training settings
parser = argparse.ArgumentParser(description="PyTorch Super Res Example")
parser.add_argument("--input_image", type=str, required=True, help="input image to use")
parser.add_argument("--model", type=str, required=True, help="model file to use")
parser.add_argument("--output_filename", type=str, help="where to save the output image")
parser.add_argument("--cuda", action="store_true", help="use cuda")
opt = parser.parse_args()
print(opt)
img = Image.open(opt.input_image).convert("YCbCr")
y, cb, cr = img.split()
model = torch.load(opt.model)
input = to_tensor(y).view(1, -1, y.size[1], y.size[0])
if opt.cuda:
model = model.cuda()
input = input.cuda()
model.eval()
with torch.no_grad():
out = model(input)
out = out.cpu()
out_img_y = out[0].detach().numpy()
out_img_y *= 255.0
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode="L")
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge("YCbCr", [out_img_y, out_img_cb, out_img_cr]).convert("RGB")
out_img.save(opt.output_filename)
print("output image saved to ", opt.output_filename)
|
from typing import Callable, Optional
import numpy as np
import torch
try:
from image_dataset_viz import render_datapoint
except ImportError:
raise ModuleNotFoundError(
"Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git"
)
def tensor_to_numpy(t: torch.Tensor) -> np.ndarray:
img = t.cpu().numpy().transpose((1, 2, 0))
return img.astype(np.uint8)
def make_grid(
batch_img: torch.Tensor,
batch_preds: torch.Tensor,
img_denormalize_fn: Callable,
batch_gt: Optional[torch.Tensor] = None,
):
"""Create a grid from batch image and mask as
i+l1+gt1 | i+l2+gt2 | i+l3+gt3 | i+l4+gt4 | ...
where i+l+gt = image + predicted label + ground truth
Args:
batch_img (torch.Tensor) batch of images of any type
batch_preds (torch.Tensor) batch of masks
img_denormalize_fn (Callable): function to denormalize batch of images
batch_gt (torch.Tensor, optional): batch of ground truth masks.
"""
assert isinstance(batch_img, torch.Tensor) and isinstance(batch_preds, torch.Tensor)
assert len(batch_img) == len(batch_preds), f"{len(batch_img)} vs {len(batch_preds)}"
assert batch_preds.ndim == 1, f"{batch_preds.ndim}"
if batch_gt is not None:
assert isinstance(batch_gt, torch.Tensor)
assert len(batch_preds) == len(batch_gt)
assert batch_gt.ndim == 1, f"{batch_gt.ndim}"
b = batch_img.shape[0]
h, w = batch_img.shape[2:]
le = 1
out_image = np.zeros((h * le, w * b, 3), dtype="uint8")
for i in range(b):
img = batch_img[i]
y_preds = batch_preds[i]
img = img_denormalize_fn(img)
img = tensor_to_numpy(img)
pred_label = y_preds.cpu().item()
target = f"p={pred_label}"
if batch_gt is not None:
gt_label = batch_gt[i]
gt_label = gt_label.cpu().item()
target += f" | gt={gt_label}"
out_image[0:h, i * w : (i + 1) * w, :] = render_datapoint(img, target, text_size=12)
return out_image
def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None):
def wrapper(engine, logger, event_name):
batch = engine.state.batch
output = engine.state.output
x, y = batch
y_pred = output[0]
if y.shape == y_pred.shape and y.ndim == 4:
# Case of y of shape (B, C, H, W)
y = torch.argmax(y, dim=1)
y_pred = torch.argmax(y_pred, dim=1).byte()
if n_images is not None:
x = x[:n_images, ...]
y = y[:n_images, ...]
y_pred = y_pred[:n_images, ...]
grid_pred_gt = make_grid(x, y_pred, img_denormalize_fn, batch_gt=y)
state = engine.state if another_engine is None else another_engine.state
global_step = state.get_event_attrib_value(event_name)
tag = "predictions_with_gt"
if prefix_tag is not None:
tag = f"{prefix_tag}: {tag}"
logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC")
return wrapper
|
import torch
import ignite
import ignite.distributed as idist
from ignite.handlers import DiskSaver
def initialize(config):
device = idist.device()
model = config.model.to(device)
optimizer = config.optimizer
# Adapt model to dist config
model = idist.auto_model(model)
optimizer = idist.auto_optim(optimizer)
criterion = config.criterion.to(device)
return model, optimizer, criterion
def log_basic_info(logger, config):
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}")
def get_save_handler(output_path, with_clearml):
if with_clearml:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=output_path)
return DiskSaver(output_path)
|
from pathlib import Path
from typing import Callable, Optional, Tuple
import cv2
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from torchvision.datasets import ImageFolder
import ignite.distributed as idist
from ignite.utils import convert_tensor
def opencv_loader(path):
img = cv2.imread(path)
assert img is not None, f"Image at '{path}' has a problem"
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs):
if limit_num_samples is not None:
g = torch.Generator().manual_seed(limit_num_samples)
indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples]
dataset = Subset(dataset, indices)
return idist.auto_dataloader(dataset, sampler=sampler, shuffle=(sampler is None) and shuffle, **kwargs)
def get_train_val_loaders(
root_path: str,
train_transforms: Callable,
val_transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
val_batch_size: Optional[int] = None,
limit_train_num_samples: Optional[int] = None,
limit_val_num_samples: Optional[int] = None,
) -> Tuple[DataLoader, DataLoader, DataLoader]:
train_ds = ImageFolder(
Path(root_path) / "train",
transform=lambda sample: train_transforms(image=sample)["image"],
loader=opencv_loader,
)
val_ds = ImageFolder(
Path(root_path) / "val", transform=lambda sample: val_transforms(image=sample)["image"], loader=opencv_loader
)
if len(val_ds) < len(train_ds):
g = torch.Generator().manual_seed(len(train_ds))
train_eval_indices = torch.randperm(len(train_ds), generator=g)[: len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
train_loader = get_dataloader(
train_ds,
shuffle=True,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
limit_num_samples=limit_train_num_samples,
)
val_loader = get_dataloader(
val_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
train_eval_loader = get_dataloader(
train_eval_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
return train_loader, val_loader, train_eval_loader
def denormalize(t, mean, std, max_pixel_value=255):
assert isinstance(t, torch.Tensor), f"{type(t)}"
assert t.ndim == 3
d = t.device
mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1)
std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1)
tensor = std * t + mean
tensor *= max_pixel_value
return tensor
def prepare_batch(batch, device, non_blocking):
x, y = batch[0], batch[1]
x = convert_tensor(x, device, non_blocking=non_blocking)
y = convert_tensor(y, device, non_blocking=non_blocking)
return x, y
|
import os
from functools import partial
from pathlib import Path
import fire
import torch
try:
from torch.cuda.amp import autocast, GradScaler
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")
import dataflow as data
import utils
import vis
from py_config_runner import ConfigObject, get_params, InferenceConfigSchema, TrainvalConfigSchema
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, Timer
from ignite.metrics import Accuracy, Frequency, TopKCategoricalAccuracy
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config, logger, with_clearml):
rank = idist.get_rank()
manual_seed(config.seed + local_rank)
train_loader = config.train_loader
val_loader = config.val_loader
train_eval_loader = config.train_eval_loader
model, optimizer, criterion = utils.initialize(config)
# Setup trainer for this specific task
trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger, with_clearml)
# Setup evaluators
accuracy = Accuracy()
val_metrics = {
"Accuracy": accuracy,
"Top-5 Accuracy": TopKCategoricalAccuracy(k=5),
"Error": (1.0 - accuracy) * 100,
}
if ("val_metrics" in config) and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val")
train_evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="train")
val_interval = config.get("val_interval", 1)
# Run validation on every val_interval epoch, in the end of the training
# and in the begining if config.start_by_validation is True
event = Events.EPOCH_COMPLETED(every=val_interval)
if config.num_epochs % val_interval != 0:
event |= Events.COMPLETED
if config.get("start_by_validation", False):
event |= Events.STARTED
@trainer.on(event)
def run_validation():
epoch = trainer.state.epoch
state = train_evaluator.run(train_eval_loader)
utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(val_loader)
utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
score_metric_name = "Accuracy"
if "es_patience" in config:
common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
# Store 2 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml),
evaluator=evaluator,
models=model,
metric_name=score_metric_name,
n_saved=2,
trainer=trainer,
tag="val",
)
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.setup_tb_logging(
config.output_path.as_posix(),
trainer,
optimizer,
evaluators={"training": train_evaluator, "validation": evaluator},
)
# Log validation predictions as images
# We define a custom event filter to log less frequently the images (to reduce storage size)
# - we plot images with masks of the middle validation batch
# - once every 3 validations and
# - at the end of the training
def custom_event_filter(_, val_iteration):
c1 = val_iteration == 1
c2 = trainer.state.epoch % (config.get("val_interval", 1) * 3) == 0
c2 |= trainer.state.epoch == config.num_epochs
return c1 and c2
# Image denormalization function to plot predictions with images
mean = config.get("mean", (0.485, 0.456, 0.406))
std = config.get("std", (0.229, 0.224, 0.225))
img_denormalize = partial(data.denormalize, mean=mean, std=std)
tb_logger.attach(
evaluator,
log_handler=vis.predictions_gt_images_handler(
img_denormalize_fn=img_denormalize, n_images=12, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
tb_logger.attach(
train_evaluator,
log_handler=vis.predictions_gt_images_handler(
img_denormalize_fn=img_denormalize, n_images=12, another_engine=trainer, prefix_tag="training"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
trainer.run(train_loader, max_epochs=config.num_epochs)
if idist.get_rank() == 0:
tb_logger.close()
def create_trainer(model, optimizer, criterion, train_sampler, config, logger, with_clearml):
device = config.device
prepare_batch = data.prepare_batch
# Setup trainer
accumulation_steps = config.get("accumulation_steps", 1)
model_output_transform = config.get("model_output_transform", lambda x: x)
with_amp = config.get("with_amp", True)
scaler = GradScaler(enabled=with_amp)
def training_step(engine, batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
with autocast(enabled=with_amp):
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y) / accumulation_steps
output = {"supervised batch loss": loss.item(), "num_samples": len(x)}
scaler.scale(loss).backward()
if engine.state.iteration % accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
return output
trainer = Engine(training_step)
trainer.logger = logger
throughput_metric = Frequency(output_transform=lambda x: x["num_samples"])
throughput_metric.attach(trainer, name="Throughput")
timer = Timer(average=True)
timer.attach(
trainer,
resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED,
step=Events.ITERATION_COMPLETED,
)
@trainer.on(Events.ITERATION_COMPLETED(every=20))
def log_progress():
metrics = dict(trainer.state.metrics)
epoch_length = trainer.state.epoch_length
metrics["ETA (seconds)"] = int((epoch_length - (trainer.state.iteration % epoch_length)) * timer.value())
metrics_str = ", ".join([f"{k}: {v}" for k, v in metrics.items()])
metrics_format = (
f"[{trainer.state.epoch}/{trainer.state.max_epochs}] "
+ f"Iter={trainer.state.iteration % epoch_length}/{epoch_length}: "
+ f"{metrics_str}"
)
trainer.logger.info(metrics_format)
output_names = [
"supervised batch loss",
]
lr_scheduler = config.lr_scheduler
to_save = {
"model": model,
"optimizer": optimizer,
"lr_scheduler": lr_scheduler,
"trainer": trainer,
"amp": scaler,
}
save_every_iters = config.get("save_every_iters", 1000)
common.setup_common_training_handlers(
trainer,
train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml),
lr_scheduler=lr_scheduler,
output_names=output_names,
# with_pbars=not with_clearml,
with_pbars=False,
log_every_iters=1,
)
resume_from = config.get("resume_from", None)
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def create_evaluator(model, metrics, config, with_clearml, tag="val"):
model_output_transform = config.get("model_output_transform", lambda x: x)
with_amp = config.get("with_amp", True)
prepare_batch = data.prepare_batch
@torch.no_grad()
def evaluate_step(engine, batch):
model.eval()
with autocast(enabled=with_amp):
x, y = prepare_batch(batch, device=config.device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
return y_pred, y
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
if idist.get_rank() == 0 and (not with_clearml):
common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator)
return evaluator
def setup_experiment_tracking(config, with_clearml, task_type="training"):
from datetime import datetime
assert task_type in ("training", "testing"), task_type
output_path = ""
if idist.get_rank() == 0:
if with_clearml:
from clearml import Task
schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema
task = Task.init("ImageNet Training", config.config_filepath.stem, task_type=task_type)
task.connect_configuration(config.config_filepath.as_posix())
task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix())
task.upload_artifact(config.config_filepath.name, config.config_filepath.as_posix())
task.connect(get_params(config, schema))
output_path = Path(os.environ.get("CLEARML_OUTPUT_PATH", "/tmp"))
output_path = output_path / "clearml" / datetime.now().strftime("%Y%m%d-%H%M%S")
else:
import shutil
output_path = Path(os.environ.get("OUTPUT_PATH", "/tmp/output-imagenet"))
output_path = output_path / task_type / config.config_filepath.stem
output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S")
output_path.mkdir(parents=True, exist_ok=True)
shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name)
shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name)
output_path = output_path.as_posix()
return Path(idist.broadcast(output_path, src=0))
def run_training(config_filepath, backend="nccl", with_clearml=True):
"""Main entry to run training experiment
Args:
config_filepath (str): training configuration .py file
backend (str): distributed backend: nccl, gloo or None to run without distributed config
with_clearml (bool): if True, uses ClearML as experiment tracking system
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled
torch.backends.cudnn.benchmark = True
config_filepath = Path(config_filepath)
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"
with idist.Parallel(backend=backend) as parallel:
logger = setup_logger(name="ImageNet Training", distributed_rank=idist.get_rank())
config = ConfigObject(config_filepath)
TrainvalConfigSchema.validate(config)
config.script_filepath = Path(__file__)
output_path = setup_experiment_tracking(config, with_clearml=with_clearml)
config.output_path = output_path
utils.log_basic_info(logger, get_params(config, TrainvalConfigSchema))
try:
parallel.run(training, config, logger=logger, with_clearml=with_clearml)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
def get_model_weights(config, logger, with_clearml):
path = ""
if with_clearml:
from clearml import Model
if idist.get_rank() > 0:
idist.barrier()
else:
model_id = config.weights_path
logger.info(f"Loading trained model: {model_id}")
model = Model(model_id)
assert model is not None, f"{model_id}"
path = model.get_local_copy()
idist.barrier()
path = idist.broadcast(path, src=0)
else:
path = config.weights_path
logger.info(f"Loading {path}")
assert Path(path).exists(), f"{path} is not found"
return torch.load(path)
def evaluation(local_rank, config, logger, with_clearml):
rank = idist.get_rank()
device = idist.device()
manual_seed(config.seed + local_rank)
data_loader = config.data_loader
model = config.model.to(device)
# Load weights:
state_dict = get_model_weights(config, logger, with_clearml)
model.load_state_dict(state_dict)
# Adapt model to dist config
model = idist.auto_model(model)
# Setup evaluators
val_metrics = {
"Accuracy": Accuracy(),
"Top-5 Accuracy": TopKCategoricalAccuracy(k=5),
}
if ("val_metrics" in config) and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val")
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix())
tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all")
state = evaluator.run(data_loader)
utils.log_metrics(logger, 0, state.times["COMPLETED"], "Validation", state.metrics)
if idist.get_rank() == 0:
tb_logger.close()
def run_evaluation(config_filepath, backend="nccl", with_clearml=True):
"""Main entry to run model's evaluation:
- compute validation metrics
Args:
config_filepath (str): evaluation configuration .py file
backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config
with_clearml (bool): if True, uses ClearML as experiment tracking system
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled
torch.backends.cudnn.benchmark = True
config_filepath = Path(config_filepath)
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"
with idist.Parallel(backend=backend) as parallel:
logger = setup_logger(name="ImageNet Evaluation", distributed_rank=idist.get_rank())
config = ConfigObject(config_filepath)
InferenceConfigSchema.validate(config)
config.script_filepath = Path(__file__)
output_path = setup_experiment_tracking(config, with_clearml=with_clearml, task_type="testing")
config.output_path = output_path
utils.log_basic_info(logger, get_params(config, InferenceConfigSchema))
try:
parallel.run(evaluation, config, logger=logger, with_clearml=with_clearml)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
if __name__ == "__main__":
fire.Fire({"training": run_training, "eval": run_evaluation})
|
# Basic training configuration
import os
from functools import partial
import albumentations as A
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import denormalize, get_train_val_loaders
from torchvision.models.resnet import resnet50
import ignite.distributed as idist
# ##############################
# Global configs
# ##############################
seed = 19
device = "cuda"
debug = True
# config to measure time passed to prepare batches and report measured time before the training
benchmark_dataflow = True
benchmark_dataflow_num_iters = 100
train_crop_size = 224
val_crop_size = 320
batch_size = 64 * idist.get_world_size() # total batch size
num_workers = 8
val_interval = 2
start_by_validation = True
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transforms = A.Compose(
[
A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)),
A.HorizontalFlip(),
A.CoarseDropout(max_height=32, max_width=32),
A.HueSaturationValue(),
A.Normalize(mean=mean, std=std),
ToTensor(),
]
)
val_transforms = A.Compose(
[
# https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76
A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)),
A.CenterCrop(val_crop_size, val_crop_size),
A.Normalize(mean=mean, std=std),
ToTensor(),
]
)
train_loader, val_loader, train_eval_loader = get_train_val_loaders(
data_path,
train_transforms=train_transforms,
val_transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
val_batch_size=batch_size,
limit_train_num_samples=batch_size * 6 if debug else None,
limit_val_num_samples=batch_size * 6 if debug else None,
)
# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)
# ##############################
# Setup Model
# ##############################
model = resnet50(weights=None)
# ##############################
# Setup Solver
# ##############################
num_epochs = 2
criterion = nn.CrossEntropyLoss()
le = len(train_loader)
base_lr = 0.1 * (batch_size / 256.0)
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-4)
lr_scheduler = lrs.MultiStepLR(optimizer, milestones=[30 * le, 60 * le, 90 * le, 100 * le], gamma=0.1)
|
# Basic training configuration
import os
from functools import partial
import albumentations as A
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import denormalize, get_train_val_loaders
from torchvision.models.resnet import resnet50
import ignite.distributed as idist
# ##############################
# Global configs
# ##############################
seed = 19
device = "cuda"
debug = False
# config to measure time passed to prepare batches and report measured time before the training
benchmark_dataflow = True
benchmark_dataflow_num_iters = 100
train_crop_size = 224
val_crop_size = 320
batch_size = 64 * idist.get_world_size() # total batch size
num_workers = 8
val_interval = 2
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transforms = A.Compose(
[
A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)),
A.HorizontalFlip(),
A.CoarseDropout(max_height=32, max_width=32),
A.HueSaturationValue(),
A.Normalize(mean=mean, std=std),
ToTensor(),
]
)
val_transforms = A.Compose(
[
# https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76
A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)),
A.CenterCrop(val_crop_size, val_crop_size),
A.Normalize(mean=mean, std=std),
ToTensor(),
]
)
train_loader, val_loader, train_eval_loader = get_train_val_loaders(
data_path,
train_transforms=train_transforms,
val_transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
val_batch_size=batch_size,
limit_train_num_samples=batch_size * 6 if debug else None,
limit_val_num_samples=batch_size * 6 if debug else None,
)
# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=mean, std=std)
# ##############################
# Setup Model
# ##############################
model = resnet50(weights=None)
# ##############################
# Setup Solver
# ##############################
num_epochs = 105
criterion = nn.CrossEntropyLoss()
le = len(train_loader)
base_lr = 0.1 * (batch_size / 256.0)
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-4)
lr_scheduler = lrs.MultiStepLR(optimizer, milestones=[30 * le, 60 * le, 90 * le, 100 * le], gamma=0.1)
|
import numpy as np
import torch
from PIL import Image
try:
from image_dataset_viz import render_datapoint
except ImportError:
raise ModuleNotFoundError(
"Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git"
)
def _getvocpallete(num_cls):
n = num_cls
pallete = [0] * (n * 3)
for j in range(0, n):
lab = j
pallete[j * 3 + 0] = 0
pallete[j * 3 + 1] = 0
pallete[j * 3 + 2] = 0
i = 0
while lab > 0:
pallete[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i)
pallete[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i)
pallete[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i)
i = i + 1
lab >>= 3
return pallete
vocpallete = _getvocpallete(256)
def render_mask(mask):
if isinstance(mask, np.ndarray):
mask = Image.fromarray(mask)
mask.putpalette(vocpallete)
mask = mask.convert(mode="RGB")
return mask
def tensor_to_rgb(t):
img = t.cpu().numpy().transpose((1, 2, 0))
return img.astype(np.uint8)
def make_grid(batch_img, batch_mask, img_denormalize_fn, batch_gt_mask=None):
"""Create a grid from batch image and mask as
img1 | img2 | img3 | img4 | ...
i+m1 | i+m2 | i+m3 | i+m4 | ...
mask1 | mask2 | mask3 | mask4 | ...
i+M1 | i+M2 | i+M3 | i+M4 | ...
Mask1 | Mask2 | Mask3 | Mask4 | ...
i+m = image + mask blended with alpha=0.4
- maskN is predicted mask
- MaskN is ground-truth mask if given
Args:
batch_img (torch.Tensor) batch of images of any type
batch_mask (torch.Tensor) batch of masks
img_denormalize_fn (Callable): function to denormalize batch of images
batch_gt_mask (torch.Tensor, optional): batch of ground truth masks.
"""
assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor)
assert len(batch_img) == len(batch_mask)
if batch_gt_mask is not None:
assert isinstance(batch_gt_mask, torch.Tensor)
assert len(batch_mask) == len(batch_gt_mask)
b = batch_img.shape[0]
h, w = batch_img.shape[2:]
le = 3 if batch_gt_mask is None else 3 + 2
out_image = np.zeros((h * le, w * b, 3), dtype="uint8")
for i in range(b):
img = batch_img[i]
mask = batch_mask[i]
img = img_denormalize_fn(img)
img = tensor_to_rgb(img)
mask = mask.cpu().numpy()
mask = render_mask(mask)
out_image[0:h, i * w : (i + 1) * w, :] = img
out_image[1 * h : 2 * h, i * w : (i + 1) * w, :] = render_datapoint(img, mask, blend_alpha=0.4)
out_image[2 * h : 3 * h, i * w : (i + 1) * w, :] = mask
if batch_gt_mask is not None:
gt_mask = batch_gt_mask[i]
gt_mask = gt_mask.cpu().numpy()
gt_mask = render_mask(gt_mask)
out_image[3 * h : 4 * h, i * w : (i + 1) * w, :] = render_datapoint(img, gt_mask, blend_alpha=0.4)
out_image[4 * h : 5 * h, i * w : (i + 1) * w, :] = gt_mask
return out_image
def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None):
def wrapper(engine, logger, event_name):
batch = engine.state.batch
output = engine.state.output
x = batch["image"]
y = batch["mask"]
y_pred = output[0]
if y.shape == y_pred.shape and y.ndim == 4:
# Case of y of shape (B, C, H, W)
y = torch.argmax(y, dim=1)
y_pred = torch.argmax(y_pred, dim=1).byte()
if n_images is not None:
x = x[:n_images, ...]
y = y[:n_images, ...]
y_pred = y_pred[:n_images, ...]
grid_pred_gt = make_grid(x, y_pred, img_denormalize_fn, batch_gt_mask=y)
state = engine.state if another_engine is None else another_engine.state
global_step = state.epoch
tag = "predictions_with_gt"
if prefix_tag is not None:
tag = f"{prefix_tag}: {tag} - epoch={global_step}"
logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC")
return wrapper
|
import torch
import ignite
import ignite.distributed as idist
from ignite.handlers import DiskSaver
def initialize(config):
device = idist.device()
model = config.model.to(device)
optimizer = config.optimizer
# Adapt model to dist config
model = idist.auto_model(model)
optimizer = idist.auto_optim(optimizer)
criterion = config.criterion.to(device)
return model, optimizer, criterion
def log_basic_info(logger, config):
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}")
def get_save_handler(output_path, with_clearml):
if with_clearml:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=output_path)
return DiskSaver(output_path)
|
import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data.dataset import Subset
from torchvision.datasets.sbd import SBDataset
from torchvision.datasets.voc import VOCSegmentation
import ignite.distributed as idist
from ignite.utils import convert_tensor
class TransformedDataset(Dataset):
def __init__(self, ds, transform_fn):
assert isinstance(ds, Dataset)
assert callable(transform_fn)
self.ds = ds
self.transform_fn = transform_fn
def __len__(self):
return len(self.ds)
def __getitem__(self, index):
dp = self.ds[index]
return self.transform_fn(**dp)
class VOCSegmentationOpencv(VOCSegmentation):
target_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
def __init__(self, *args, return_meta=False, **kwargs):
super(VOCSegmentationOpencv, self).__init__(*args, **kwargs)
self.return_meta = return_meta
def __getitem__(self, index):
img = cv2.imread(self.images[index])
assert img is not None, f"Image at '{self.images[index]}' has a problem"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = np.asarray(Image.open(self.masks[index]))
if self.return_meta:
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
class SBDatasetOpencv(SBDataset):
def __init__(self, *args, return_meta=False, **kwargs):
super(SBDatasetOpencv, self).__init__(*args, **kwargs)
assert self.mode == "segmentation", "SBDatasetOpencv should be in segmentation mode only"
self.return_meta = return_meta
def _get_segmentation_target(self, filepath):
mat = self._loadmat(filepath)
return mat["GTcls"][0]["Segmentation"][0]
def __getitem__(self, index):
img = cv2.imread(self.images[index])
assert img is not None, f"Image at '{self.images[index]}' has a problem"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = self._get_target(self.masks[index])
if self.return_meta:
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
def get_train_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(
root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta
)
def get_val_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta)
def get_train_noval_sbdataset(root_path, return_meta=False):
return SBDatasetOpencv(root_path, image_set="train_noval", mode="segmentation", return_meta=return_meta)
def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs):
if limit_num_samples is not None:
g = torch.Generator().manual_seed(limit_num_samples)
indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples]
dataset = Subset(dataset, indices)
return idist.auto_dataloader(dataset, sampler=sampler, shuffle=(sampler is None) and shuffle, **kwargs)
def get_train_val_loaders(
root_path,
train_transforms,
val_transforms,
batch_size=16,
num_workers=8,
train_sampler=None,
val_batch_size=None,
sbd_path=None,
limit_train_num_samples=None,
limit_val_num_samples=None,
):
train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)
if sbd_path is not None:
sbd_train_ds = get_train_noval_sbdataset(sbd_path)
train_ds = train_ds + sbd_train_ds
if len(val_ds) < len(train_ds):
g = torch.Generator().manual_seed(len(train_ds))
train_eval_indices = torch.randperm(len(train_ds), generator=g)[: len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
train_ds = TransformedDataset(train_ds, transform_fn=train_transforms)
val_ds = TransformedDataset(val_ds, transform_fn=val_transforms)
train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
train_loader = get_dataloader(
train_ds,
shuffle=True,
sampler=train_sampler,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
limit_num_samples=limit_train_num_samples,
)
val_loader = get_dataloader(
val_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
train_eval_loader = get_dataloader(
train_eval_ds,
shuffle=False,
batch_size=val_batch_size,
num_workers=num_workers,
drop_last=False,
limit_num_samples=limit_val_num_samples,
)
return train_loader, val_loader, train_eval_loader
def get_inference_dataloader(
root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None
):
assert mode in ("train", "test"), "Mode should be 'train' or 'test'"
get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset
dataset = get_dataset_fn(root_path, return_meta=True)
dataset = TransformedDataset(dataset, transform_fn=transforms)
return get_dataloader(
dataset,
limit_num_samples=limit_num_samples,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=False,
)
def ignore_mask_boundaries(**kwargs):
assert "mask" in kwargs, "Input should contain 'mask'"
mask = kwargs["mask"]
mask[mask == 255] = 0
kwargs["mask"] = mask
return kwargs
def denormalize(t, mean, std, max_pixel_value=255):
assert isinstance(t, torch.Tensor), f"{type(t)}"
assert t.ndim == 3
d = t.device
mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1)
std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1)
tensor = std * t + mean
tensor *= max_pixel_value
return tensor
def prepare_image_mask(batch, device, non_blocking):
x, y = batch["image"], batch["mask"]
x = convert_tensor(x, device, non_blocking=non_blocking)
y = convert_tensor(y, device, non_blocking=non_blocking).long()
return x, y
|
import os
from functools import partial
from pathlib import Path
import fire
import torch
try:
from torch.cuda.amp import autocast, GradScaler
except ImportError:
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0")
import dataflow as data
import utils
import vis
from py_config_runner import ConfigObject, get_params, InferenceConfigSchema, TrainvalConfigSchema
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint
from ignite.metrics import ConfusionMatrix, IoU, mIoU
from ignite.utils import manual_seed, setup_logger
def download_datasets(output_path):
"""Helper tool to download datasets
Args:
output_path (str): path where to download and unzip the dataset
"""
from torchvision.datasets.sbd import SBDataset
from torchvision.datasets.voc import VOCSegmentation
output_path = Path(output_path)
output_path.mkdir(parents=True, exist_ok=True)
print("Download Pascal VOC 2012 - Training")
VOCSegmentation(output_path.as_posix(), image_set="train", download=True)
print("Download Pascal VOC 2012 - Validation")
VOCSegmentation(output_path.as_posix(), image_set="val", download=True)
print("Download SBD - Training without Pascal VOC validation part")
sbd_path = output_path / "SBD"
sbd_path.mkdir(exist_ok=True)
SBDataset(sbd_path.as_posix(), image_set="train_noval", mode="segmentation", download=True)
print("Done")
print(f"Pascal VOC 2012 is at : {(output_path / 'VOCdevkit').as_posix()}")
print(f"SBD is at : {sbd_path.as_posix()}")
def training(local_rank, config, logger, with_clearml):
rank = idist.get_rank()
manual_seed(config.seed + local_rank)
train_loader = config.train_loader
val_loader = config.val_loader
train_eval_loader = config.train_eval_loader
model, optimizer, criterion = utils.initialize(config)
# Setup trainer for this specific task
trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger, with_clearml)
# Setup evaluators
num_classes = config.num_classes
cm_metric = ConfusionMatrix(num_classes=num_classes)
val_metrics = {
"IoU": IoU(cm_metric),
"mIoU_bg": mIoU(cm_metric),
}
if ("val_metrics" in config) and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val")
train_evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="train")
val_interval = config.get("val_interval", 1)
# Run validation on every val_interval epoch, in the end of the training
# and in the begining if config.start_by_validation is True
event = Events.EPOCH_COMPLETED(every=val_interval)
if config.num_epochs % val_interval != 0:
event |= Events.COMPLETED
if config.get("start_by_validation", False):
event |= Events.STARTED
@trainer.on(event)
def run_validation():
epoch = trainer.state.epoch
state = train_evaluator.run(train_eval_loader)
utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(val_loader)
utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
score_metric_name = "mIoU_bg"
if "es_patience" in config:
common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
# Store 2 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml),
evaluator=evaluator,
models=model,
metric_name=score_metric_name,
n_saved=2,
trainer=trainer,
tag="val",
)
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.setup_tb_logging(
config.output_path.as_posix(),
trainer,
optimizer,
evaluators={"training": train_evaluator, "validation": evaluator},
)
# Log validation predictions as images
# We define a custom event filter to log less frequently the images (to reduce storage size)
# - we plot images with masks of the middle validation batch
# - once every 3 validations and
# - at the end of the training
def custom_event_filter(_, val_iteration):
c1 = val_iteration == len(val_loader) // 2
c2 = trainer.state.epoch % (config.get("val_interval", 1) * 3) == 0
c2 |= trainer.state.epoch == config.num_epochs
return c1 and c2
# Image denormalization function to plot predictions with images
mean = config.get("mean", (0.485, 0.456, 0.406))
std = config.get("std", (0.229, 0.224, 0.225))
img_denormalize = partial(data.denormalize, mean=mean, std=std)
tb_logger.attach(
evaluator,
log_handler=vis.predictions_gt_images_handler(
img_denormalize_fn=img_denormalize, n_images=8, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
# Log confusion matrix to ClearML:
if with_clearml:
trainer.add_event_handler(Events.COMPLETED, compute_and_log_cm, cm_metric, trainer.state.iteration)
trainer.run(train_loader, max_epochs=config.num_epochs)
if idist.get_rank() == 0:
tb_logger.close()
def compute_and_log_cm(cm_metric, iteration):
cm = cm_metric.compute()
# CM: values are normalized such that diagonal values represent class recalls
cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy()
if idist.get_rank() == 0:
from clearml import Task
clearml_logger = Task.current_task().get_logger()
try:
clearml_logger.report_confusion_matrix(
title="Final Confusion Matrix",
matrix=cm,
iteration=iteration,
xlabels=data.VOCSegmentationOpencv.target_names,
ylabels=data.VOCSegmentationOpencv.target_names,
extra_layout=None,
)
except NameError:
# Temporary clearml bug work-around:
# https://github.com/allegroai/clearml/pull/936
pass
def create_trainer(model, optimizer, criterion, train_sampler, config, logger, with_clearml):
device = config.device
prepare_batch = data.prepare_image_mask
# Setup trainer
accumulation_steps = config.get("accumulation_steps", 1)
model_output_transform = config.get("model_output_transform", lambda x: x)
with_amp = config.get("with_amp", True)
scaler = GradScaler(enabled=with_amp)
def forward_pass(batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
with autocast(enabled=with_amp):
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y) / accumulation_steps
return loss
def amp_backward_pass(engine, loss):
scaler.scale(loss).backward()
if engine.state.iteration % accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
def hvd_amp_backward_pass(engine, loss):
scaler.scale(loss).backward()
optimizer.synchronize()
with optimizer.skip_synchronize():
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if idist.backend() == "horovod" and with_amp:
backward_pass = hvd_amp_backward_pass
else:
backward_pass = amp_backward_pass
def training_step(engine, batch):
loss = forward_pass(batch)
output = {"supervised batch loss": loss.item()}
backward_pass(engine, loss)
return output
trainer = Engine(training_step)
trainer.logger = logger
output_names = [
"supervised batch loss",
]
lr_scheduler = config.lr_scheduler
to_save = {
"model": model,
"optimizer": optimizer,
"lr_scheduler": lr_scheduler,
"trainer": trainer,
"amp": scaler,
}
save_every_iters = config.get("save_every_iters", 1000)
common.setup_common_training_handlers(
trainer,
train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml),
lr_scheduler=lr_scheduler,
output_names=output_names,
with_pbars=not with_clearml,
log_every_iters=1,
)
resume_from = config.get("resume_from", None)
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def create_evaluator(model, metrics, config, with_clearml, tag="val"):
model_output_transform = config.get("model_output_transform", lambda x: x)
with_amp = config.get("with_amp", True)
prepare_batch = data.prepare_image_mask
@torch.no_grad()
def evaluate_step(engine, batch):
model.eval()
with autocast(enabled=with_amp):
x, y = prepare_batch(batch, device=config.device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
return y_pred, y
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
if idist.get_rank() == 0 and (not with_clearml):
common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator)
return evaluator
def setup_experiment_tracking(config, with_clearml, task_type="training"):
from datetime import datetime
assert task_type in ("training", "testing"), task_type
output_path = ""
if idist.get_rank() == 0:
if with_clearml:
from clearml import Task
schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema
task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type)
task.connect_configuration(config.config_filepath.as_posix())
task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix())
task.upload_artifact(config.config_filepath.name, config.config_filepath.as_posix())
task.connect(get_params(config, schema))
output_path = Path(os.environ.get("CLEARML_OUTPUT_PATH", "/tmp"))
output_path = output_path / "clearml" / datetime.now().strftime("%Y%m%d-%H%M%S")
else:
import shutil
output_path = Path(os.environ.get("OUTPUT_PATH", "/tmp/output-pascal-voc12"))
output_path = output_path / task_type / config.config_filepath.stem
output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S")
output_path.mkdir(parents=True, exist_ok=True)
shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name)
shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name)
output_path = output_path.as_posix()
return Path(idist.broadcast(output_path, src=0))
def run_training(config_filepath, backend="nccl", with_clearml=True):
"""Main entry to run training experiment
Args:
config_filepath (str): training configuration .py file
backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config
with_clearml (bool): if True, uses ClearML as experiment tracking system
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled
torch.backends.cudnn.benchmark = True
config_filepath = Path(config_filepath)
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"
with idist.Parallel(backend=backend) as parallel:
logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())
config = ConfigObject(config_filepath)
TrainvalConfigSchema.validate(config)
config.script_filepath = Path(__file__)
output_path = setup_experiment_tracking(config, with_clearml=with_clearml)
config.output_path = output_path
utils.log_basic_info(logger, get_params(config, TrainvalConfigSchema))
try:
parallel.run(training, config, logger=logger, with_clearml=with_clearml)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
def get_model_weights(config, logger, with_clearml):
path = ""
if with_clearml:
from clearml import Model
if idist.get_rank() > 0:
idist.barrier()
else:
model_id = config.weights_path
logger.info(f"Loading trained model: {model_id}")
model = Model(model_id)
assert model is not None, f"{model_id}"
path = model.get_local_copy()
idist.barrier()
path = idist.broadcast(path, src=0)
else:
path = config.weights_path
logger.info(f"Loading {path}")
assert Path(path).exists(), f"{path} is not found"
return torch.load(path)
def evaluation(local_rank, config, logger, with_clearml):
rank = idist.get_rank()
device = idist.device()
manual_seed(config.seed + local_rank)
data_loader = config.data_loader
model = config.model.to(device)
# Load weights:
state_dict = get_model_weights(config, logger, with_clearml)
model.load_state_dict(state_dict)
# Adapt model to dist config
model = idist.auto_model(model)
# Setup evaluators
num_classes = config.num_classes
cm_metric = ConfusionMatrix(num_classes=num_classes)
val_metrics = {
"IoU": IoU(cm_metric),
"mIoU_bg": mIoU(cm_metric),
}
if ("val_metrics" in config) and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val")
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix())
tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all")
# Log confusion matrix to ClearML:
if with_clearml:
evaluator.add_event_handler(Events.COMPLETED, compute_and_log_cm, cm_metric, evaluator.state.iteration)
state = evaluator.run(data_loader)
utils.log_metrics(logger, 0, state.times["COMPLETED"], "Validation", state.metrics)
if idist.get_rank() == 0:
tb_logger.close()
def run_evaluation(config_filepath, backend="nccl", with_clearml=True):
"""Main entry to run model's evaluation:
- compute validation metrics
Args:
config_filepath (str): evaluation configuration .py file
backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config
with_clearml (bool): if True, uses ClearML as experiment tracking system
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled
torch.backends.cudnn.benchmark = True
config_filepath = Path(config_filepath)
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"
with idist.Parallel(backend=backend) as parallel:
logger = setup_logger(name="Pascal-VOC12 Evaluation", distributed_rank=idist.get_rank())
config = ConfigObject(config_filepath)
InferenceConfigSchema.validate(config)
config.script_filepath = Path(__file__)
output_path = setup_experiment_tracking(config, with_clearml=with_clearml, task_type="testing")
config.output_path = output_path
utils.log_basic_info(logger, get_params(config, InferenceConfigSchema))
try:
parallel.run(evaluation, config, logger=logger, with_clearml=with_clearml)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
if __name__ == "__main__":
fire.Fire({"download": download_datasets, "training": run_training, "eval": run_evaluation})
|
# Basic training configuration
import os
from functools import partial
import albumentations as A
import cv2
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_train_val_loaders, ignore_mask_boundaries
from torchvision.models.segmentation import deeplabv3_resnet101
# ##############################
# Global configs
# ##############################
seed = 21
device = "cuda"
debug = False
# Use AMP with torch native
with_amp = True
num_classes = 21
batch_size = 18 # total batch size
val_batch_size = batch_size * 2
num_workers = 12 # total num workers per node
val_interval = 3
# grads accumulation:
accumulation_steps = 4
val_img_size = 513
train_img_size = 480
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
assert "SBD_DATASET_PATH" in os.environ
sbd_data_path = os.environ["SBD_DATASET_PATH"]
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
train_transforms = A.Compose(
[
A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0),
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.RandomCrop(train_img_size, train_img_size),
A.HorizontalFlip(),
A.Blur(blur_limit=3),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
val_transforms = A.Compose(
[
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
train_loader, val_loader, train_eval_loader = get_train_val_loaders(
root_path=data_path,
train_transforms=train_transforms,
val_transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
val_batch_size=val_batch_size,
sbd_path=sbd_data_path,
limit_train_num_samples=100 if debug else None,
limit_val_num_samples=100 if debug else None,
)
# ##############################
# Setup model
# ##############################
num_classes = 21
model = deeplabv3_resnet101(num_classes=num_classes)
def model_output_transform(output):
return output["out"]
# ##############################
# Setup solver
# ##############################
save_every_iters = len(train_loader)
num_epochs = 100
criterion = nn.CrossEntropyLoss()
lr = 0.007
weight_decay = 5e-4
momentum = 0.9
nesterov = False
optimizer = optim.SGD(
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
nesterov=nesterov,
)
le = len(train_loader)
def lambda_lr_scheduler(iteration, lr0, n, a):
return lr0 * pow((1.0 - 1.0 * iteration / n), a)
lr_scheduler = lrs.LambdaLR(
optimizer,
lr_lambda=[
partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9),
partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9),
],
)
|
# Basic training configuration
import os
from functools import partial
import albumentations as A
import cv2
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_train_val_loaders, ignore_mask_boundaries
from torchvision.models.segmentation import deeplabv3_resnet101
# ##############################
# Global configs
# ##############################
seed = 21
device = "cuda"
debug = False
# Use AMP with torch native
with_amp = True
num_classes = 21
batch_size = 18 # total batch size
val_batch_size = batch_size * 2
num_workers = 12 # total num workers per node
val_interval = 3
# grads accumulation:
accumulation_steps = 4
val_img_size = 513
train_img_size = 480
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
train_transforms = A.Compose(
[
A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0),
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.RandomCrop(train_img_size, train_img_size),
A.HorizontalFlip(),
A.Blur(blur_limit=3),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
val_transforms = A.Compose(
[
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
train_loader, val_loader, train_eval_loader = get_train_val_loaders(
root_path=data_path,
train_transforms=train_transforms,
val_transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
val_batch_size=val_batch_size,
limit_train_num_samples=100 if debug else None,
limit_val_num_samples=100 if debug else None,
)
# ##############################
# Setup model
# ##############################
num_classes = 21
model = deeplabv3_resnet101(num_classes=num_classes)
def model_output_transform(output):
return output["out"]
# ##############################
# Setup solver
# ##############################
save_every_iters = len(train_loader)
num_epochs = 100
criterion = nn.CrossEntropyLoss()
lr = 0.007
weight_decay = 5e-4
momentum = 0.9
nesterov = False
optimizer = optim.SGD(
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
nesterov=nesterov,
)
le = len(train_loader)
def lambda_lr_scheduler(iteration, lr0, n, a):
return lr0 * pow((1.0 - 1.0 * iteration / n), a)
lr_scheduler = lrs.LambdaLR(
optimizer,
lr_lambda=[
partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9),
partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9),
],
)
|
# Basic training configuration
import os
import albumentations as A
import cv2
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow import get_inference_dataloader, ignore_mask_boundaries
from torchvision.models.segmentation import deeplabv3_resnet101
# ##############################
# Global configs
# ##############################
seed = 21
device = "cuda"
debug = False
# Use AMP with torch native
with_amp = True
num_classes = 21
batch_size = 9 # total batch size
num_workers = 8 # total num workers per node
val_img_size = 513
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
assert "SBD_DATASET_PATH" in os.environ
sbd_data_path = os.environ["SBD_DATASET_PATH"]
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
val_transforms = A.Compose(
[
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
data_loader = get_inference_dataloader(
root_path=data_path,
mode="test",
transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
limit_num_samples=batch_size * 5 if debug else None,
)
# ##############################
# Setup model
# ##############################
num_classes = 21
model = deeplabv3_resnet101(num_classes=num_classes)
def model_output_transform(output):
return output["out"]
# baseline_dplv3_resnet101_sbd: best_model_78_val_miou_bg=0.6871.pt
weights_path = "d8b4687d86cf445a944853fdd6a6b999"
# or can specify a path
# weights_path = "/path/to/best_model.pt"
|
import argparse
from collections import deque, namedtuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from ignite.engine import Engine, Events
try:
import gymnasium as gym
except ImportError:
raise ModuleNotFoundError("Please install opengym: pip install gymnasium")
SavedAction = namedtuple("SavedAction", ["log_prob", "value"])
eps = np.finfo(np.float32).eps.item()
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
# actor's layer
self.action_head = nn.Linear(128, 2)
# critic's layer
self.value_head = nn.Linear(128, 1)
# action & reward buffer
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
# actor: choses action to take from state s_t
# by returning probability of each action
action_prob = F.softmax(self.action_head(x), dim=-1)
# critic: evaluates being in the state s_t
state_values = self.value_head(x)
# return values for both actor and critic as a tuple of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_prob, state_values
def select_action(policy, observation):
observation = torch.from_numpy(observation).float()
probs, observation_value = policy(observation)
# create a categorical distribution over the list of probabilities of actions
m = Categorical(probs)
# and sample an action using the distribution
action = m.sample()
# save to action buffer
policy.saved_actions.append(SavedAction(m.log_prob(action), observation_value))
# the action to take (left or right)
return action.item()
def finish_episode(policy, optimizer, gamma):
"""
Training code. Calculates actor and critic loss and performs backprop.
"""
R = 0
saved_actions = policy.saved_actions
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
returns = deque() # list to save the true values
# calculate the true value using rewards returned from the environment
for r in policy.rewards[::-1]:
# calculate the discounted value
R = r + gamma * R
returns.appendleft(R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for (log_prob, value), R in zip(saved_actions, returns):
advantage = R - value.item()
# calculate actor (policy) loss
policy_losses.append(-log_prob * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# reset gradients
optimizer.zero_grad()
# sum up all the values of policy_losses and value_losses
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
# perform backprop
loss.backward()
optimizer.step()
# reset rewards and action buffer
del policy.rewards[:]
del policy.saved_actions[:]
EPISODE_STARTED = Events.EPOCH_STARTED
EPISODE_COMPLETED = Events.EPOCH_COMPLETED
def main(env, args):
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=3e-2)
timesteps = range(10000)
def run_single_timestep(engine, timestep):
observation = engine.state.observation
# select action from policy
action = select_action(policy, observation)
# take the action
engine.state.observation, reward, done, _, _ = env.step(action)
if args.render:
env.render()
policy.rewards.append(reward)
engine.state.ep_reward += reward
if done:
engine.terminate_epoch()
engine.state.timestep = timestep
trainer = Engine(run_single_timestep)
trainer.state.running_reward = 10
@trainer.on(EPISODE_STARTED)
def reset_environment_state():
# reset environment and episode reward
torch.manual_seed(args.seed + trainer.state.epoch)
trainer.state.observation, _ = env.reset(seed=args.seed + trainer.state.epoch)
trainer.state.ep_reward = 0
@trainer.on(EPISODE_COMPLETED)
def update_model():
# update cumulative reward
t = trainer.state.timestep
trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward
# perform backprop
finish_episode(policy, optimizer, args.gamma)
@trainer.on(EPISODE_COMPLETED(every=args.log_interval))
def log_episode():
i_episode = trainer.state.epoch
print(
f"Episode {i_episode}\tLast reward: {trainer.state.ep_reward:.2f}"
f"\tAverage reward: {trainer.state.running_reward:.2f}"
)
@trainer.on(EPISODE_COMPLETED)
def should_finish_training():
# check if we have "solved" the cart pole problem
running_reward = trainer.state.running_reward
if running_reward > env.spec.reward_threshold:
print(
f"Solved! Running reward is now {running_reward} and "
f"the last episode runs to {trainer.state.timestep} time steps!"
)
trainer.should_terminate = True
trainer.run(timesteps, max_epochs=args.max_episodes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ignite actor-critic example")
parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)")
parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 1)")
parser.add_argument("--render", action="store_true", help="render the environment")
parser.add_argument(
"--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)"
)
parser.add_argument(
"--max-episodes",
type=int,
default=1000000,
metavar="N",
help="Number of episodes for the training (default: 1000000)",
)
args = parser.parse_args()
env = gym.make("CartPole-v1")
main(env, args)
|
import argparse
from collections import deque
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from ignite.engine import Engine, Events
try:
import gymnasium as gym
except ImportError:
raise ModuleNotFoundError("Please install opengym: pip install gymnasium")
eps = np.finfo(np.float32).eps.item()
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = self.dropout(x)
x = F.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
def select_action(policy, observation):
state = torch.from_numpy(observation).float().unsqueeze(0)
probs = policy(state)
m = Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item()
def finish_episode(policy, optimizer, gamma):
R = 0
policy_loss = []
returns = deque()
for r in policy.rewards[::-1]:
R = r + gamma * R
returns.appendleft(R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
EPISODE_STARTED = Events.EPOCH_STARTED
EPISODE_COMPLETED = Events.EPOCH_COMPLETED
def main(env, args):
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
timesteps = range(10000)
def run_single_timestep(engine, timestep):
observation = engine.state.observation
action = select_action(policy, observation)
engine.state.observation, reward, done, _, _ = env.step(action)
if args.render:
env.render()
policy.rewards.append(reward)
engine.state.ep_reward += reward
if done:
engine.terminate_epoch()
engine.state.timestep = timestep
trainer = Engine(run_single_timestep)
trainer.state.running_reward = 10
@trainer.on(EPISODE_STARTED)
def reset_environment_state():
torch.manual_seed(args.seed + trainer.state.epoch)
trainer.state.observation, _ = env.reset(seed=args.seed + trainer.state.epoch)
trainer.state.ep_reward = 0
@trainer.on(EPISODE_COMPLETED)
def update_model():
trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward
finish_episode(policy, optimizer, args.gamma)
@trainer.on(EPISODE_COMPLETED(every=args.log_interval))
def log_episode():
i_episode = trainer.state.epoch
print(
f"Episode {i_episode}\tLast reward: {trainer.state.ep_reward:.2f}"
f"\tAverage length: {trainer.state.running_reward:.2f}"
)
@trainer.on(EPISODE_COMPLETED)
def should_finish_training():
running_reward = trainer.state.running_reward
if running_reward > env.spec.reward_threshold:
print(
f"Solved! Running reward is now {running_reward} and "
f"the last episode runs to {trainer.state.timestep} time steps!"
)
trainer.should_terminate = True
trainer.run(timesteps, max_epochs=args.max_episodes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch REINFORCE example")
parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)")
parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 543)")
parser.add_argument("--render", action="store_true", help="render the environment")
parser.add_argument(
"--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)"
)
parser.add_argument(
"--max-episodes",
type=int,
default=1000000,
metavar="N",
help="Number of episodes for the training (default: 1000000)",
)
args = parser.parse_args()
env = gym.make("CartPole-v1")
main(env, args)
|
import torch
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode="nearest", scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
|
from collections import namedtuple
import torch
from torchvision import models
from torchvision.models.vgg import VGG16_Weights
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out
|
import sys
class Progbar(object):
def __init__(self, loader, metrics):
self.num_iterations = len(loader)
self.output_stream = sys.stdout
self.metrics = metrics
self.alpha = 0.98
def _calc_running_avg(self, engine):
for k, v in engine.state.output.items():
old_v = self.metrics.get(k, v)
new_v = self.alpha * old_v + (1 - self.alpha) * v
self.metrics[k] = new_v
def __call__(self, engine):
self._calc_running_avg(engine)
num_seen = engine.state.iteration - self.num_iterations * (engine.state.epoch - 1)
percent_seen = 100 * float(num_seen) / self.num_iterations
equal_to = int(percent_seen / 10)
done = int(percent_seen) == 100
bar = "[" + "=" * equal_to + ">" * (not done) + " " * (10 - equal_to) + "]"
message = f"Epoch {engine.state.epoch} | {percent_seen:.2f}% | {bar}"
for key, value in self.metrics.items():
message += f" | {key}: {value:.2e}"
message += "\r"
self.output_stream.write(message)
self.output_stream.flush()
if done:
self.output_stream.write("\n")
|
# coding: utf-8
import argparse
import random
from collections import OrderedDict
from pathlib import Path
import numpy as np
import torch
import utils
from handlers import Progbar
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from transformer_net import TransformerNet
from vgg import Vgg16
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
def check_paths(args):
try:
if args.checkpoint_model_dir is not None and not (Path(args.checkpoint_model_dir).exists()):
Path(args.checkpoint_model_dir).mkdir(parents=True)
except OSError as e:
raise OSError(e)
def check_manual_seed(args):
seed = args.seed or random.randint(1, 10000)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def check_dataset(args):
transform = transforms.Compose(
[
transforms.Resize(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255)),
]
)
if args.dataset in {"folder", "mscoco"}:
train_dataset = datasets.ImageFolder(args.dataroot, transform)
elif args.dataset == "test":
train_dataset = datasets.FakeData(
size=args.batch_size, image_size=(3, 32, 32), num_classes=1, transform=transform
)
else:
raise RuntimeError(f"Invalid dataset name: {args.dataset}")
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
return train_loader
def train(args):
device = torch.device("cuda" if args.cuda else "cpu")
train_loader = check_dataset(args)
transformer = TransformerNet().to(device)
optimizer = Adam(transformer.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
vgg = Vgg16(requires_grad=False).to(device)
style_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))])
style = utils.load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
features_style = vgg(utils.normalize_batch(style))
gram_style = [utils.gram_matrix(y) for y in features_style]
running_avgs = OrderedDict()
def step(engine, batch):
x, _ = batch
x = x.to(device)
n_batch = len(x)
optimizer.zero_grad()
y = transformer(x)
x = utils.normalize_batch(x)
y = utils.normalize_batch(y)
features_x = vgg(x)
features_y = vgg(y)
content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2)
style_loss = 0.0
for ft_y, gm_s in zip(features_y, gram_style):
gm_y = utils.gram_matrix(ft_y)
style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
style_loss *= args.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
optimizer.step()
return {"content_loss": content_loss.item(), "style_loss": style_loss.item(), "total_loss": total_loss.item()}
trainer = Engine(step)
checkpoint_handler = ModelCheckpoint(
args.checkpoint_model_dir, "checkpoint", n_saved=10, require_empty=False, create_dir=True
)
progress_bar = Progbar(loader=train_loader, metrics=running_avgs)
trainer.add_event_handler(
event_name=Events.EPOCH_COMPLETED(every=args.checkpoint_interval),
handler=checkpoint_handler,
to_save={"net": transformer},
)
trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=progress_bar)
trainer.run(train_loader, max_epochs=args.epochs)
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
content_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))])
content_image = utils.load_image(args.content_image, scale=args.content_scale)
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
with torch.no_grad():
style_model = torch.load(args.model)
style_model.to(device)
output = style_model(content_image).cpu()
utils.save_image(args.output_image, output[0])
def main():
main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand")
train_arg_parser = subparsers.add_parser("train", help="parser for training arguments")
train_arg_parser.add_argument("--epochs", type=int, default=2, help="number of training epochs, default is 2")
train_arg_parser.add_argument("--batch_size", type=int, default=8, help="batch size for training, default is 8")
train_arg_parser.add_argument(
"--dataset", type=str, required=True, choices={"test", "folder", "mscoco"}, help="type of dataset to be used."
)
train_arg_parser.add_argument(
"--dataroot",
type=str,
required=True,
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images",
)
train_arg_parser.add_argument("--style_image", type=str, default="test", help="path to style-image")
train_arg_parser.add_argument("--test_image", type=str, default="test", help="path to test-image")
train_arg_parser.add_argument(
"--checkpoint_model_dir",
type=str,
default="/tmp/checkpoints",
help="path to folder where checkpoints of trained models will be saved",
)
train_arg_parser.add_argument(
"--checkpoint_interval",
type=int,
default=1,
help="number of batches after which a checkpoint of trained model will be created",
)
train_arg_parser.add_argument(
"--image_size", type=int, default=256, help="size of training images, default is 256 X 256"
)
train_arg_parser.add_argument(
"--style_size", type=int, default=None, help="size of style-image, default is the original size of style image"
)
train_arg_parser.add_argument("--cuda", type=int, default=1, help="set it to 1 for running on GPU, 0 for CPU")
train_arg_parser.add_argument("--seed", type=int, default=42, help="random seed for training")
train_arg_parser.add_argument(
"--content_weight", type=float, default=1e5, help="weight for content-loss, default is 1e5"
)
train_arg_parser.add_argument(
"--style_weight", type=float, default=1e10, help="weight for style-loss, default is 1e10"
)
train_arg_parser.add_argument("--lr", type=float, default=1e-3, help="learning rate, default is 1e-3")
eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg_parser.add_argument(
"--content_image", type=str, required=True, help="path to content image you want to stylize"
)
eval_arg_parser.add_argument(
"--content_scale", type=float, default=None, help="factor for scaling down the content image"
)
eval_arg_parser.add_argument("--output_image", type=str, required=True, help="path for saving the output image")
eval_arg_parser.add_argument(
"--model", type=str, required=True, help="saved model to be used for stylizing the image."
)
eval_arg_parser.add_argument("--cuda", type=int, required=True, help="set it to 1 for running on GPU, 0 for CPU")
args = main_arg_parser.parse_args()
if args.subcommand is None:
raise ValueError("ERROR: specify either train or eval")
if args.cuda and not torch.cuda.is_available():
raise ValueError("ERROR: cuda is not available, try running on CPU")
if args.subcommand == "train":
check_manual_seed(args)
check_paths(args)
train(args)
else:
stylize(args)
if __name__ == "__main__":
main()
|
from PIL import Image
def load_image(filename, size=None, scale=None):
img = Image.open(filename)
if size is not None:
img = img.resize((size, size), Image.LANCZOS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.LANCZOS)
return img
def save_image(filename, data):
img = data.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
img.save(filename)
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def normalize_batch(batch):
# normalize using imagenet mean and std
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
batch = batch.div_(255.0)
return (batch - mean) / std
|
import torch.nn as nn
from transformers import AutoConfig, AutoModelForSequenceClassification
class TransformerModel(nn.Module):
def __init__(self, model_name, model_dir, dropout, n_fc, n_classes):
super(TransformerModel, self).__init__()
self.config = AutoConfig.from_pretrained(
model_name,
num_labels=n_classes,
output_hidden_states=n_fc,
classifier_dropout=dropout,
output_attentions=True,
)
self.transformer = AutoModelForSequenceClassification.from_pretrained(
model_name, cache_dir=model_dir, config=self.config
)
def forward(self, inputs):
output = self.transformer(**inputs)["logits"]
return output
|
import torch
class TransformerDataset(torch.utils.data.Dataset):
def __init__(self, texts, labels, tokenizer, max_length):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_length = max_length
def __getitem__(self, idx):
text = str(self.texts[idx])
text = " ".join(text.split())
inputs = self.tokenizer(
text,
None,
add_special_tokens=True,
max_length=self.max_length,
truncation=True,
padding="max_length",
return_tensors="pt",
)
inputs = {k: v.type(torch.long).squeeze(0) for k, v in inputs.items()}
labels_pt = torch.tensor(self.labels[idx], dtype=torch.float)
return inputs, labels_pt
def __len__(self):
return len(self.labels)
|
import torch
from dataset import TransformerDataset
from datasets import load_dataset
from model import TransformerModel
from transformers import AutoTokenizer
from ignite.handlers import DiskSaver
def get_tokenizer(tokenizer_name, tokenizer_dir):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, cache_dir=tokenizer_dir, do_lower_case=True)
return tokenizer
def get_model(model_name, model_dir, drop_out, n_fc, num_classes):
model = TransformerModel(model_name, model_dir, drop_out, n_fc, num_classes)
return model
def get_dataset(cache_dir, tokenizer_name, tokenizer_dir, max_length):
train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"], cache_dir=cache_dir)
tokenizer = get_tokenizer(tokenizer_name, tokenizer_dir)
train_texts, train_labels = train_dataset["text"], train_dataset["label"]
test_texts, test_labels = test_dataset["text"], test_dataset["label"]
train_dataset = TransformerDataset(train_texts, train_labels, tokenizer, max_length)
test_dataset = TransformerDataset(test_texts, test_labels, tokenizer, max_length)
return train_dataset, test_dataset
def thresholded_output_transform(output):
y_pred, y = output
return torch.round(torch.sigmoid(y_pred)), y
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_dir"])
return DiskSaver(config["output_dir"], require_empty=False)
|
import os
from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, global_step_from_engine
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
os.environ["TOKENIZERS_PARALLELISM"] = "false" # remove tokenizer paralleism warning
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="IMDB-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_dir"]
if rank == 0:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_dir"] = output_path.as_posix()
logger.info(f"Output path: {config['output_dir']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
from clearml import Task
task = Task.init("IMDB-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"dropout",
"n_fc",
"batch_size",
"max_length",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"Accuracy": Accuracy(output_transform=utils.thresholded_output_transform),
"Loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_evaluator(model, metrics, config, tag="val")
train_evaluator = create_evaluator(model, metrics, config, tag="train")
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(
Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED | Events.STARTED, run_validation
)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(
output_path, trainer, optimizer, evaluators=evaluators, log_every_iters=config["log_every_iters"]
)
# Store 2 best models by validation accuracy starting from num_epochs / 2:
best_model_handler = Checkpoint(
{"model": model},
utils.get_save_handler(config),
filename_prefix="best",
n_saved=2,
global_step_transform=global_step_from_engine(trainer),
score_name="test_accuracy",
score_function=Checkpoint.get_default_score_fn("Accuracy"),
)
evaluator.add_event_handler(
Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler
)
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
logger.exception("")
raise e
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_dir="/tmp/data",
output_dir="/tmp/output-imdb/",
model="bert-base-uncased",
model_dir="/tmp/model",
tokenizer_dir="/tmp/tokenizer",
num_classes=1,
dropout=0.3,
n_fc=768,
max_length=256,
batch_size=32,
weight_decay=0.01,
num_workers=4,
num_epochs=3,
learning_rate=5e-5,
num_warmup_epochs=0,
validate_every=1,
checkpoint_every=1000,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
with_clearml=False,
with_amp=False,
**spawn_kwargs,
):
"""Main entry to fintune a transformer model on the IMDB dataset for sentiment classification.
Args:
seed (int): random state seed to set. Default, 543.
data_dir (str): dataset cache directory. Default, "/tmp/data".
output_path (str): output path. Default, "/tmp/output-IMDB".
model (str): model name (from transformers) to setup model,tokenize and config to train. Default,
"bert-base-uncased".
model_dir (str): cache directory to download the pretrained model. Default, "/tmp/model".
tokenizer_dir (str) : tokenizer cache directory. Default, "/tmp/tokenizer".
num_classes (int) : number of target classes. Default, 1 (binary classification).
dropout (float) : dropout probability. Default, 0.3.
n_fc (int) : number of neurons in the last fully connected layer. Default, 768.
max_length (int) : maximum number of tokens for the inputs to the transformer model. Default,256
batch_size (int): total batch size. Default, 128 .
weight_decay (float): weight decay. Default, 0.01 .
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 5.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 5e-5.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 3.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 1000.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
with_amp (bool): if True, enables native automatic mixed precision. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# check to see if the num_epochs is greater than or equal to num_warmup_epochs
if num_warmup_epochs >= num_epochs:
raise ValueError(
"num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease "
"num_warmup_epochs"
)
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_local_rank() > 0:
# Ensure that only local rank 0 download the dataset
# Thus each node will download a copy of the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_dataset(
config["data_dir"], config["model"], config["tokenizer_dir"], config["max_length"]
)
if idist.get_local_rank() == 0:
# Ensure that only local rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(
config["model"], config["model_dir"], config["dropout"], config["n_fc"], config["num_classes"]
)
config["learning_rate"] *= idist.get_world_size()
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"])
optimizer = idist.auto_optim(optimizer)
criterion = nn.BCEWithLogitsLoss()
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Train {config['model']} on IMDB")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
with_amp = config["with_amp"]
scaler = GradScaler(enabled=with_amp)
def train_step(engine, batch):
input_batch = batch[0]
labels = batch[1].view(-1, 1)
if labels.device != device:
input_batch = {k: v.to(device, non_blocking=True, dtype=torch.long) for k, v in batch[0].items()}
labels = labels.to(device, non_blocking=True, dtype=torch.float)
model.train()
with autocast(enabled=with_amp):
y_pred = model(input_batch)
loss = criterion(y_pred, labels)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return {
"batch loss": loss.item(),
}
trainer = Engine(train_step)
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
if config["log_every_iters"] == 0:
# Disable logging training metrics:
metric_names = None
config["log_every_iters"] = 15
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=utils.get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names,
log_every_iters=config["log_every_iters"],
with_pbars=not config["with_clearml"],
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def create_evaluator(model, metrics, config, tag="val"):
with_amp = config["with_amp"]
device = idist.device()
@torch.no_grad()
def evaluate_step(engine, batch):
model.eval()
input_batch = batch[0]
labels = batch[1].view(-1, 1)
if labels.device != device:
input_batch = {k: v.to(device, non_blocking=True, dtype=torch.long) for k, v in batch[0].items()}
labels = labels.to(device, non_blocking=True, dtype=torch.float)
with autocast(enabled=with_amp):
output = model(input_batch)
return output, labels
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
if idist.get_rank() == 0 and (not config["with_clearml"]):
common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator)
return evaluator
if __name__ == "__main__":
fire.Fire({"run": run})
|
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers.param_scheduler import LRScheduler
from ignite.metrics import Accuracy, RunningAverage
from ignite.utils import manual_seed
class SiameseNetwork(nn.Module):
# update Siamese Network implementation in accordance with the dataset
"""
Siamese network for image similarity estimation.
The network is composed of two identical networks, one for each input.
The output of each network is concatenated and passed to a linear layer.
The output of the linear layer passed through a sigmoid function.
`"FaceNet" <https://arxiv.org/pdf/1503.03832.pdf>`_ is a variant of the Siamese network.
This implementation varies from FaceNet as we use the `ResNet-18` model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`
as our feature extractor.
In addition we use CIFAR10 dataset along with TripletMarginLoss
"""
def __init__(self):
super(SiameseNetwork, self).__init__()
# get resnet model
self.resnet = torchvision.models.resnet34(weights=None)
fc_in_features = self.resnet.fc.in_features
# changing the FC layer of resnet model to a linear layer
self.resnet.fc = nn.Identity()
# add linear layers to compare between the features of the two images
self.fc = nn.Sequential(
nn.Linear(fc_in_features, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 10),
nn.ReLU(inplace=True),
)
# initialise relu activation
self.relu = nn.ReLU()
# initialize the weights
self.resnet.apply(self.init_weights)
self.fc.apply(self.init_weights)
def init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def forward_once(self, x):
output = self.resnet(x)
output = output.view(output.size()[0], -1)
return output
def forward(self, input1, input2, input3):
# pass the input through resnet
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
output3 = self.forward_once(input3)
# pass the output of resnet to sigmoid layer
output1 = self.fc(output1)
output2 = self.fc(output2)
output3 = self.fc(output3)
return output1, output2, output3
class MatcherDataset(Dataset):
# following class implements data downloading and handles preprocessing
def __init__(self, root, train, download=False):
super(MatcherDataset, self).__init__()
# get CIFAR10 dataset
self.dataset = datasets.CIFAR10(root, train=train, download=download)
# convert data from numpy array to Tensor
self.data = torch.from_numpy(self.dataset.data)
# shift the dimensions of dataset to match the initial input layer dimensions
self.data = torch.movedim(self.data, (0, 1, 2, 3), (0, 2, 3, 1))
# convert targets list to torch Tensor
self.dataset.targets = torch.tensor(self.dataset.targets)
self.group_examples()
def group_examples(self):
"""
To ease the accessibility of data based on the class, we will use `group_examples` to group
examples based on class. The data classes have already been mapped to numeric values and
so are the target outputs for each training input
Every key in `grouped_examples` corresponds to a class in CIFAR10 dataset. For every key in
`grouped_examples`, every value will conform to all of the indices for the CIFAR10
dataset examples that correspond to that key.
"""
# get the targets from CIFAR10 dataset
np_arr = np.array(self.dataset.targets)
# group examples based on class
self.grouped_examples = {}
for i in range(0, 10):
self.grouped_examples[i] = np.where((np_arr == i))[0]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
"""
For every sample in the batch we select 3 images. First one is the anchor image
which is the image obtained from the current index. We also obtain the label of
anchor image.
Now we select two random images, one belonging to the same class as that of the
anchor image (named as positive_image) and the other belonging to a different class
than that of the anchor image (named as negative_image). We return the anchor image,
positive image, negative image and anchor label.
"""
# obtain the anchor image
anchor_image = self.data[index].float()
# obtain the class label of the anchor image
anchor_label = self.dataset.targets[index]
anchor_label = int(anchor_label.item())
# find a label which is different from anchor_label
labels = list(range(0, 10))
labels.remove(anchor_label)
neg_index = torch.randint(0, 9, (1,)).item()
neg_label = labels[neg_index]
# get a random index from the range range of indices
random_index = torch.randint(0, len(self.grouped_examples[anchor_label]), (1,)).item()
# get the index of image in actual data using the anchor label and random index
positive_index = self.grouped_examples[anchor_label][random_index]
# choosing a random image using positive_index
positive_image = self.data[positive_index].float()
# get a random index from the range range of indices
random_index = torch.randint(0, len(self.grouped_examples[neg_label]), (1,)).item()
# get the index of image in actual data using the negative label and random index
negative_index = self.grouped_examples[neg_label][random_index]
# choosing a random image using negative_index
negative_image = self.data[negative_index].float()
return anchor_image, positive_image, negative_image, anchor_label
def pairwise_distance(input1, input2):
dist = input1 - input2
dist = torch.pow(dist, 2)
return dist
def calculate_loss(input1, input2):
output = pairwise_distance(input1, input2)
loss = torch.sum(output, 1)
loss = torch.sqrt(loss)
return loss
def run(args, model, device, optimizer, train_loader, test_loader, lr_scheduler):
# using Triplet Margin Loss
criterion = nn.TripletMarginLoss(p=2, margin=2.8)
# define model training step
def train_step(engine, batch):
model.train()
anchor_image, positive_image, negative_image, anchor_label = batch
anchor_image = anchor_image.to(device)
positive_image, negative_image = positive_image.to(device), negative_image.to(device)
anchor_label = anchor_label.to(device)
optimizer.zero_grad()
anchor_out, positive_out, negative_out = model(anchor_image, positive_image, negative_image)
loss = criterion(anchor_out, positive_out, negative_out)
loss.backward()
optimizer.step()
return loss
# define model testing step
def test_step(engine, batch):
model.eval()
with torch.no_grad():
anchor_image, _, _, anchor_label = batch
anchor_image = anchor_image.to(device)
anchor_label = anchor_label.to(device)
other_image = []
other_label = []
y_true = []
for i in range(anchor_image.shape[0]):
index = torch.randint(0, anchor_image.shape[0], (1,)).item()
img = anchor_image[index]
label = anchor_label[index]
other_image.append(img)
other_label.append(label)
if anchor_label[i] == other_label[i]:
y_true.append(1)
else:
y_true.append(0)
other = torch.stack(other_image)
other_label = torch.tensor(other_label)
other, other_label = other.to(device), other_label.to(device)
anchor_out, other_out, _ = model(anchor_image, other, other)
test_loss = calculate_loss(anchor_out, other_out)
y_pred = torch.where(test_loss < 3, 1, 0)
y_true = torch.tensor(y_true)
return [y_pred, y_true]
# create engines for trainer and evaluator
trainer = Engine(train_step)
evaluator = Engine(test_step)
# attach Running Average Loss metric to trainer and evaluator engines
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
Accuracy(output_transform=lambda x: x).attach(evaluator, "accuracy")
# attach progress bar to trainer with loss
pbar1 = ProgressBar()
pbar1.attach(trainer, metric_names=["loss"])
# attach progress bar to evaluator
pbar2 = ProgressBar()
pbar2.attach(evaluator)
# attach LR Scheduler to trainer engine
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
# event handler triggers evauator at end of every epoch
@trainer.on(Events.EPOCH_COMPLETED(every=args.log_interval))
def test(engine):
state = evaluator.run(test_loader)
print(f'Test Accuracy: {state.metrics["accuracy"]}')
# run the trainer
trainer.run(train_loader, max_epochs=args.epochs)
def main():
# adds training defaults and support for terminal arguments
parser = argparse.ArgumentParser(description="PyTorch Siamese network Example")
parser.add_argument(
"--batch-size", type=int, default=256, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument(
"--test-batch-size", type=int, default=256, metavar="N", help="input batch size for testing (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 14)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument(
"--gamma", type=float, default=0.95, metavar="M", help="Learning rate step gamma (default: 0.7)"
)
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--no-mps", action="store_true", default=False, help="disables macOS GPU training")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=1,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--save-model", action="store_true", default=False, help="For Saving the current Model")
parser.add_argument("--num-workers", default=4, help="number of processes generating parallel batches")
args = parser.parse_args()
# set manual seed
manual_seed(args.seed)
# set device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# data loading
train_dataset = MatcherDataset("../data", train=True, download=True)
test_dataset = MatcherDataset("../data", train=False)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, num_workers=args.num_workers)
# set model parameters
model = SiameseNetwork().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=15, gamma=args.gamma)
lr_scheduler = LRScheduler(scheduler)
# call run function
run(args, model, device, optimizer, train_loader, test_loader, lr_scheduler)
if __name__ == "__main__":
main()
|
import os
from pathlib import Path
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
train_transform = Compose(
[
Pad(4),
RandomCrop(32, fill=128),
RandomHorizontalFlip(),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def get_train_test_datasets(path):
path = Path(path)
if not path.exists():
path.mkdir(parents=True)
download = True
else:
download = True if len(os.listdir(path)) < 1 else False
train_ds = datasets.CIFAR10(root=path, train=True, download=download, transform=train_transform)
test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform)
return train_ds, test_ds
def get_model(name):
if name in models.__dict__:
fn = models.__dict__[name]
else:
raise RuntimeError(f"Unknown model name {name}")
return fn(num_classes=10)
|
from datetime import datetime
from pathlib import Path
from typing import Any, Optional
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training")
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
from clearml import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_evaluator(model, metrics=metrics, config=config)
train_evaluator = create_evaluator(model, metrics=metrics, config=config)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 2 best models by validation accuracy starting from num_epochs / 2:
best_model_handler = Checkpoint(
{"model": model},
get_save_handler(config),
filename_prefix="best",
n_saved=2,
global_step_transform=global_step_from_engine(trainer),
score_name="test_accuracy",
score_function=Checkpoint.get_default_score_fn("Accuracy"),
)
evaluator.add_event_handler(
Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info(f"Stop training on {trainer.state.iteration} iteration")
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
logger.exception("")
raise e
if rank == 0:
tb_logger.close()
def run(
seed: int = 543,
data_path: str = "/tmp/cifar10",
output_path: str = "/tmp/output-cifar10/",
model: str = "resnet18",
batch_size: int = 512,
momentum: float = 0.9,
weight_decay: float = 1e-4,
num_workers: int = 12,
num_epochs: int = 24,
learning_rate: float = 0.4,
num_warmup_epochs: int = 4,
validate_every: int = 3,
checkpoint_every: int = 1000,
backend: Optional[str] = None,
resume_from: Optional[str] = None,
log_every_iters: int = 15,
nproc_per_node: Optional[int] = None,
stop_iteration: Optional[int] = None,
with_clearml: bool = False,
with_amp: bool = False,
**spawn_kwargs: Any,
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 1000.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
with_amp (bool): if True, enables native automatic mixed precision. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# check to see if the num_epochs is greater than or equal to num_warmup_epochs
if num_warmup_epochs >= num_epochs:
raise ValueError(
"num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease "
"num_warmup_epochs"
)
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
if backend == "xla-tpu" and with_amp:
raise RuntimeError("The value of with_amp should be False if backend is xla")
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
with idist.one_rank_first(local=True):
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"Epoch[{epoch}] - Evaluation time (seconds): {elapsed:.3f}\n - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Train {config['model']} on CIFAR10")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
if torch.cuda.is_available():
# explicitly import cudnn as
# torch.backends.cudnn can not be pickled with hvd spawning procs
from torch.backends import cudnn
logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}")
logger.info(f"- CUDA version: {torch.version.cuda}")
logger.info(f"- CUDNN version: {cudnn.version()}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
with_amp = config["with_amp"]
scaler = GradScaler(enabled=with_amp)
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
with autocast(enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return {
"batch loss": loss.item(),
}
trainer = Engine(train_step)
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def create_evaluator(model, metrics, config, tag="val"):
with_amp = config["with_amp"]
device = idist.device()
@torch.no_grad()
def evaluate_step(engine: Engine, batch):
model.eval()
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
with autocast(enabled=with_amp):
output = model(x)
return output, y
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
return evaluator
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
|
import os
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.metrics import Accuracy
in_colab = "COLAB_TPU_ADDR" in os.environ
with_torchrun = "WORLD_SIZE" in os.environ
train_transform = Compose(
[
Pad(4),
RandomCrop(32, fill=128),
RandomHorizontalFlip(),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.23, 0.225)),
]
)
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.23, 0.225)),])
def get_train_test_datasets(path):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_ds = datasets.CIFAR10(root=path, train=True, download=True, transform=train_transform)
test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform)
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
return train_ds, test_ds
def get_model(name):
if name in models.__dict__:
fn = models.__dict__[name]
else:
raise RuntimeError(f"Unknown model name {name}")
return fn(num_classes=10)
def get_dataflow(config):
train_dataset, test_dataset = get_train_test_datasets(config.get("data_path", "."))
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset,
batch_size=config.get("batch_size", 512),
num_workers=config.get("num_workers", 8),
shuffle=True,
drop_last=True,
)
config["num_iters_per_epoch"] = len(train_loader)
test_loader = idist.auto_dataloader(
test_dataset,
batch_size=2 * config.get("batch_size", 512),
num_workers=config.get("num_workers", 8),
shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config.get("learning_rate", 0.1),
momentum=config.get("momentum", 0.9),
weight_decay=config.get("weight_decay", 1e-5),
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
lr_scheduler = StepLR(optimizer, step_size=le, gamma=0.9)
return model, optimizer, criterion, lr_scheduler
# slide 1 ####################################################################
def create_trainer(model, optimizer, criterion, lr_scheduler, config):
# Define any training logic for iteration update
def train_step(engine, batch):
x, y = batch[0].to(idist.device()), batch[1].to(idist.device())
model.train()
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
return loss.item()
# Define trainer engine
trainer = Engine(train_step)
if idist.get_rank() == 0:
# Add any custom handlers
@trainer.on(Events.ITERATION_COMPLETED(every=200))
def save_checkpoint():
fp = Path(config.get("output_path", "output")) / "checkpoint.pt"
torch.save(model.state_dict(), fp)
# Add progress bar showing batch loss value
ProgressBar().attach(trainer, output_transform=lambda x: {"batch loss": x})
return trainer
# slide 2 ####################################################################
def training(local_rank, config):
# Setup dataflow and
train_loader, val_loader = get_dataflow(config)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Setup model trainer and evaluator
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, config)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=idist.device())
# Run model evaluation every 3 epochs and show results
@trainer.on(Events.EPOCH_COMPLETED(every=3))
def evaluate_model():
state = evaluator.run(val_loader)
if idist.get_rank() == 0:
print(state.metrics)
# Setup tensorboard experiment tracking
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.get("output_path", "output"), trainer, optimizer, evaluators={"validation": evaluator},
)
trainer.run(train_loader, max_epochs=config.get("max_epochs", 3))
if idist.get_rank() == 0:
tb_logger.close()
# slide 3 ####################################################################
# Simply run everything on your infrastructure
# --- Single computation device ---
# $ python main.py
#
if __name__ == "__main__" and not (in_colab or with_torchrun):
backend = None
nproc_per_node = None
config = {
"model": "resnet18",
"dataset": "cifar10",
}
with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel:
parallel.run(training, config)
# --- Multiple GPUs ---
# $ torchrun --nproc_per_node=2 main.py
#
if __name__ == "__main__" and with_torchrun:
backend = "nccl" # or "nccl", "gloo", ...
nproc_per_node = None
config = {
"model": "resnet18",
"dataset": "cifar10",
}
with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel:
parallel.run(training, config)
# --- Multiple TPUs ---
# In Colab
#
if in_colab:
backend = "xla-tpu"
nproc_per_node = 8
config = {
"model": "resnet18",
"dataset": "cifar10",
}
with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel:
parallel.run(training, config)
# Full featured CIFAR10 example:
# https://github.com/pytorch/ignite/tree/master/examples/cifar10
|
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
torchscript_model_optimized = optimize_for_mobile(traced_script_module)
torchscript_model_optimized._save_for_lite_interpreter("HelloWorld/HelloWorld/model/model.pt")
|
from typing import Dict, List, Optional, Tuple
import json
import math
from fairseq.data import Dictionary
import torch
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.models import Hypothesis
def get_hypo_tokens(hypo: Hypothesis) -> List[int]:
return hypo[0]
def get_hypo_score(hypo: Hypothesis) -> float:
return hypo[3]
def to_string(input: List[int], tgt_dict: List[str], bos_idx: int = 0, eos_idx: int = 2, separator: str = "",) -> str:
# torchscript dislikes sets
extra_symbols_to_ignore: Dict[int, int] = {}
extra_symbols_to_ignore[eos_idx] = 1
extra_symbols_to_ignore[bos_idx] = 1
# it also dislikes comprehensions with conditionals
filtered_idx: List[int] = []
for idx in input:
if idx not in extra_symbols_to_ignore:
filtered_idx.append(idx)
return separator.join([tgt_dict[idx] for idx in filtered_idx]).replace("\u2581", " ")
def post_process_hypos(
hypos: List[Hypothesis], tgt_dict: List[str],
) -> List[Tuple[str, List[float], List[int]]]:
post_process_remove_list = [
3, # unk
2, # eos
1, # pad
]
hypos_str: List[str] = []
for h in hypos:
filtered_tokens: List[int] = []
for token_index in get_hypo_tokens(h)[1:]:
if token_index not in post_process_remove_list:
filtered_tokens.append(token_index)
string = to_string(filtered_tokens, tgt_dict)
hypos_str.append(string)
hypos_ids = [get_hypo_tokens(h)[1:] for h in hypos]
hypos_score = [[math.exp(get_hypo_score(h))] for h in hypos]
nbest_batch = list(zip(hypos_str, hypos_score, hypos_ids))
return nbest_batch
def _piecewise_linear_log(x):
x[x > math.e] = torch.log(x[x > math.e])
x[x <= math.e] = x[x <= math.e] / math.e
return x
class ModelWrapper(torch.nn.Module):
def __init__(self, tgt_dict: List[str]):
super().__init__()
self.transform = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=400, n_mels=80, hop_length=160)
self.decoder = EMFORMER_RNNT_BASE_LIBRISPEECH.get_decoder()
self.tgt_dict = tgt_dict
with open("global_stats.json") as f:
blob = json.loads(f.read())
self.mean = torch.tensor(blob["mean"])
self.invstddev = torch.tensor(blob["invstddev"])
self.decibel = 2 * 20 * math.log10(32767)
self.gain = pow(10, 0.05 * self.decibel)
def forward(
self, input: torch.Tensor, prev_hypo: Optional[Hypothesis], prev_state: Optional[List[List[torch.Tensor]]]
) -> Tuple[str, Hypothesis, Optional[List[List[torch.Tensor]]]]:
spectrogram = self.transform(input).transpose(1, 0)
features = _piecewise_linear_log(spectrogram * self.gain).unsqueeze(0)[:, :-1]
features = (features - self.mean) * self.invstddev
length = torch.tensor([features.shape[1]])
hypotheses, state = self.decoder.infer(features, length, 10, state=prev_state, hypothesis=prev_hypo)
transcript = post_process_hypos(hypotheses[:1], self.tgt_dict)[0][0]
return transcript, hypotheses[0], state
tgt_dict = Dictionary.load("spm_bpe_4096_fairseq.dict")
wrapper = ModelWrapper(tgt_dict.symbols)
wrapper = torch.jit.script(wrapper)
wrapper.save("scripted_wrapper_tuple.pt")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.