python_code
stringlengths 0
229k
|
---|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.inception_score import InceptionScore
def calculate_inception_score(p_yx):
p_y = torch.unsqueeze(p_yx.mean(axis=0), 0)
kl_d = torch.kl_div(torch.log(p_y), p_yx)
sum_kl_d = kl_d.sum(axis=1)
avg_kl_d = torch.mean(sum_kl_d)
is_score = torch.exp(avg_kl_d)
return is_score
def test_inception_score():
p_yx = torch.rand(20, 10)
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity())
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
p_yx = torch.rand(20, 3, 299, 299)
m = InceptionScore()
m.update(p_yx)
assert isinstance(m.compute(), float)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
p_yx = torch.rand(20, 10).to("cpu")
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity().to("cpu"), device="cuda")
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
InceptionScore(num_features=-1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(
NotComputableError, match=r"InceptionScore must have at least one example before it can be computed."
):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).compute()
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
InceptionScore(feature_extractor=torch.nn.Identity())
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(metric_device):
n_iters = 60
s = 16
offset = n_iters * s
n_probabilities = 10
y = torch.rand(offset * idist.get_world_size(), n_probabilities)
def update(_, i):
return y[i * s + rank * offset : (i + 1) * s + rank * offset, :]
engine = Engine(update)
m = InceptionScore(num_features=n_probabilities, feature_extractor=torch.nn.Identity(), device=metric_device)
m.attach(engine, "InceptionScore")
engine.run(data=list(range(n_iters)), max_epochs=1)
assert "InceptionScore" in engine.state.metrics
assert pytest.approx(calculate_inception_score(y), rel=1e-5) == m.compute()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = torch.device(f"cuda:{local_rank}")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import re
from unittest.mock import patch
import pytest
import pytorch_fid.fid_score as pytorch_fid_score
import scipy
import torch
from numpy import cov
import ignite.distributed as idist
from ignite.metrics.gan.fid import FID, fid_score
@pytest.fixture()
def mock_no_scipy():
with patch.dict("sys.modules", {"scipy": None}):
yield scipy
def test_no_scipy(mock_no_scipy):
with pytest.raises(ModuleNotFoundError, match=r"This module requires scipy to be installed."):
FID()
with pytest.raises(ModuleNotFoundError, match=r"fid_score requires scipy to be installed."):
fid_score(0, 0, 0, 0)
@pytest.fixture()
def mock_no_numpy():
with patch.dict("sys.modules", {"numpy": None}):
yield scipy
def test_no_numpy(mock_no_numpy):
with pytest.raises(ModuleNotFoundError, match=r"This module requires numpy to be installed."):
FID()
with pytest.raises(ModuleNotFoundError, match=r"fid_score requires numpy to be installed."):
fid_score(0, 0, 0, 0)
def test_fid_function():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
sigma1 = torch.tensor(sigma1, dtype=torch.float64)
sigma2 = torch.tensor(sigma2, dtype=torch.float64)
assert pytest.approx(fid_score(mu1, mu2, sigma1, sigma2), rel=1e-5) == pytorch_fid_score.calculate_frechet_distance(
mu1, sigma1, mu2, sigma2
)
def test_compute_fid_from_features():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity())
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
assert (
pytest.approx(pytorch_fid_score.calculate_frechet_distance(mu1, sigma1, mu2, sigma2), rel=1e-5)
== fid_scorer.compute()
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
train_samples, test_samples = torch.rand(10, 10).to("cpu"), torch.rand(10, 10).to("cpu")
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity().to("cpu"), device="cuda")
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
assert (
pytest.approx(pytorch_fid_score.calculate_frechet_distance(mu1, sigma1, mu2, sigma2), rel=1e-4)
== fid_scorer.compute()
)
def test_compute_fid_sqrtm():
mu1 = torch.tensor([0, 0])
mu2 = torch.tensor([0, 0])
sigma1 = torch.tensor([[-1, 1], [1, 1]], dtype=torch.float64)
sigma2 = torch.tensor([[1, 0], [0, 1]], dtype=torch.float64)
with pytest.raises(ValueError, match=r"Imaginary component "):
fid_score(mu1, mu2, sigma1, sigma2)
sigma1 = torch.ones((2, 2), dtype=torch.float64) * torch.finfo(torch.float64).max
sigma2 = torch.tensor([[1, 0.5], [0, 0.5]], dtype=torch.float64)
assert torch.isinf(torch.tensor(fid_score(mu1, mu2, sigma1, sigma2)))
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero"):
FID(num_features=-1, feature_extractor=torch.nn.Identity())
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.tensor([[], []]))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1, got: 0"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 2, 0))
err_str = (
"Number of Training Features and Testing Features should be equal (torch.Size([9, 2]) != torch.Size([5, 2]))"
)
with pytest.raises(ValueError, match=re.escape(err_str)):
FID(num_features=2, feature_extractor=torch.nn.Identity()).update((torch.rand(9, 2), torch.rand(5, 2)))
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module"):
FID(num_features=1, feature_extractor=lambda x: x)
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
FID(feature_extractor=torch.nn.Identity())
def test_statistics():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity())
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), torch.tensor(cov(train_samples, rowvar=False))
mu2, sigma2 = test_samples.mean(axis=0), torch.tensor(cov(test_samples, rowvar=False))
fid_mu1 = fid_scorer._train_total / fid_scorer._num_examples
fid_sigma1 = fid_scorer._get_covariance(fid_scorer._train_sigma, fid_scorer._train_total)
fid_mu2 = fid_scorer._test_total / fid_scorer._num_examples
fid_sigma2 = fid_scorer._get_covariance(fid_scorer._test_sigma, fid_scorer._test_total)
assert torch.isclose(mu1.double(), fid_mu1).all()
for cov1, cov2 in zip(sigma1, fid_sigma1):
assert torch.isclose(cov1.double(), cov2, rtol=1e-04, atol=1e-04).all()
assert torch.isclose(mu2.double(), fid_mu2).all()
for cov1, cov2 in zip(sigma2, fid_sigma2):
assert torch.isclose(cov1.double(), cov2, rtol=1e-04, atol=1e-04).all()
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(metric_device):
n_iters = 60
s = 16
offset = n_iters * s
n_features = 10
y_pred = torch.rand(offset * idist.get_world_size(), n_features)
y_true = torch.rand(offset * idist.get_world_size(), n_features)
def update(_, i):
return (
y_pred[i * s + rank * offset : (i + 1) * s + rank * offset, :],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset, :],
)
engine = Engine(update)
m = FID(num_features=n_features, feature_extractor=torch.nn.Identity(), device=metric_device)
m.attach(engine, "fid")
engine.run(data=list(range(n_iters)), max_epochs=1)
assert "fid" in engine.state.metrics
evaluator = pytorch_fid_score.calculate_frechet_distance
mu1, sigma1 = y_pred.mean(axis=0).to("cpu"), cov(y_pred.to("cpu"), rowvar=False)
mu2, sigma2 = y_true.mean(axis=0).to("cpu"), cov(y_true.to("cpu"), rowvar=False)
assert pytest.approx(evaluator(mu1, sigma1, mu2, sigma2), rel=1e-5) == m.compute()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = torch.device(f"cuda:{local_rank}")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import pytest
from ignite.metrics.nlp.utils import lcs, modified_precision, ngrams
@pytest.mark.parametrize(
"sequence, n, expected_keys, expected_values",
[
([], 1, [], []),
([0, 1, 2], 1, [(0,), (1,), (2,)], [1, 1, 1]),
([0, 1, 2], 2, [(0, 1), (1, 2)], [1, 1]),
([0, 1, 2], 3, [(0, 1, 2)], [1]),
([0, 0, 0], 1, [(0,)], [3]),
([0, 0, 0], 2, [(0, 0)], [2]),
("abcde", 4, [("a", "b", "c", "d"), ("b", "c", "d", "e")], [1, 1]),
],
)
def test_ngrams(sequence, n, expected_keys, expected_values):
ngrams_counter = ngrams(sequence=sequence, n=n)
assert list(ngrams_counter.values()) == expected_values
assert list(ngrams_counter.keys()) == expected_keys
@pytest.mark.parametrize(
"seq_a, seq_b, expected",
[([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4)],
)
def test_lcs(seq_a, seq_b, expected):
assert lcs(seq_a, seq_b) == expected
def test_modified_precision_empty():
for k in range(1, 5):
n, d = modified_precision([[]], [], k)
assert n == 0 and d == 0
n, d = modified_precision([[]], [0], k)
assert n == 0 and d == (k == 1)
n, d = modified_precision([[0]], [], k)
assert n == 0 and d == 0
n, d = modified_precision([[]], list(range(k)), k)
assert n == 0 and d == 1
n, d = modified_precision([list(range(k))], [], k)
assert n == 0 and d == 0
@pytest.mark.parametrize(
"references, candidate, expected",
[
([[0, 0, 0], [1, 2]], [1, 2, 3, 4], ((2, 4), (1, 3), (0, 2))),
([[0, 1, 2], [0, 0, 3]], [0, 0, 0, 1, 2], ((4, 5), (3, 4), (1, 3))),
([[0, 1, 2], [3, 0, 3]], [3, 0, 0, 1, 2], ((4, 5), (3, 4), (1, 3))),
],
)
def test_modified_precision(references, candidate, expected):
for n, (e_n, e_d) in enumerate(expected, start=1):
n, d = modified_precision(references, candidate, n)
assert n == e_n and d == e_d
|
import os
import warnings
from collections import Counter
import pytest
import torch
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Bleu
from . import CorpusForTest
corpus = CorpusForTest(lower_split=True)
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"ngram order must be greater than zero"):
Bleu(ngram=0)
with pytest.raises(ValueError, match=r"Smooth is not valid"):
Bleu(smooth="fake")
with pytest.raises(ValueError, match=r"nb of candidates should be equal to nb of reference lists"):
Bleu()._corpus_bleu(references=[[0], [0]], candidates=[[0]])
with pytest.raises(NotComputableError):
Bleu().compute()
with pytest.raises(ValueError, match='Average must be either "macro" or "micro"'):
Bleu(average="macros")
parametrize_args = (
"candidates, references",
[
([["a", "a", "a", "b", "c"]], [[["a", "b", "c"], ["a", "a", "d"]]]),
corpus.sample_1,
corpus.sample_2,
corpus.sample_3,
corpus.sample_4,
],
)
def _test(candidates, references, average, smooth="no_smooth", smooth_nltk_fn=None, ngram_range=8):
for i in range(1, ngram_range):
weights = tuple([1 / i] * i)
bleu = Bleu(ngram=i, average=average, smooth=smooth)
if average == "macro":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference = sentence_bleu(
references[0], candidates[0], weights=weights, smoothing_function=smooth_nltk_fn
)
assert pytest.approx(reference) == bleu._sentence_bleu(references[0], candidates[0])
elif average == "micro":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference = corpus_bleu(references, candidates, weights=weights, smoothing_function=smooth_nltk_fn)
assert pytest.approx(reference) == bleu._corpus_bleu(references, candidates)
bleu.update((candidates, references))
assert pytest.approx(reference) == bleu.compute()
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu(candidates, references):
_test(candidates, references, "macro")
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu(candidates, references):
_test(candidates, references, "micro")
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_smooth1(candidates, references):
_test(candidates, references, "macro", "smooth1", SmoothingFunction().method1)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_smooth1(candidates, references):
_test(candidates, references, "micro", "smooth1", SmoothingFunction().method1)
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_nltk_smooth2(candidates, references):
_test(candidates, references, "macro", "nltk_smooth2", SmoothingFunction().method2)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_nltk_smooth2(candidates, references):
_test(candidates, references, "micro", "nltk_smooth2", SmoothingFunction().method2)
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_smooth2(candidates, references):
_test(candidates, references, "macro", "smooth2", SmoothingFunction().method2, 3)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_smooth2(candidates, references):
_test(candidates, references, "micro", "smooth2", SmoothingFunction().method2, 3)
def test_accumulation_macro_bleu():
bleu = Bleu(ngram=4, smooth="smooth2")
bleu.update(([corpus.cand_1], [corpus.references_1]))
bleu.update(([corpus.cand_2a], [corpus.references_2]))
bleu.update(([corpus.cand_2b], [corpus.references_2]))
bleu.update(([corpus.cand_3], [corpus.references_2]))
value = bleu._sentence_bleu(corpus.references_1, corpus.cand_1)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_2a)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_2b)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_3)
assert bleu.compute() == value / 4
def test_accumulation_micro_bleu():
bleu = Bleu(ngram=4, smooth="smooth2", average="micro")
bleu.update(([corpus.cand_1], [corpus.references_1]))
bleu.update(([corpus.cand_2a], [corpus.references_2]))
bleu.update(([corpus.cand_2b], [corpus.references_2]))
bleu.update(([corpus.cand_3], [corpus.references_2]))
value = bleu._corpus_bleu(
[corpus.references_1, corpus.references_2, corpus.references_2, corpus.references_2],
[corpus.cand_1, corpus.cand_2a, corpus.cand_2b, corpus.cand_3],
)
assert bleu.compute() == value
def test_bleu_batch_macro():
bleu = Bleu(ngram=4)
# Batch size 3
hypotheses = [corpus.cand_1, corpus.cand_2a, corpus.cand_2b]
refs = [corpus.references_1, corpus.references_2, corpus.references_2]
bleu.update((hypotheses, refs))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference_bleu_score = (
sentence_bleu(refs[0], hypotheses[0])
+ sentence_bleu(refs[1], hypotheses[1])
+ sentence_bleu(refs[2], hypotheses[2])
) / 3
assert pytest.approx(bleu.compute()) == reference_bleu_score
value = 0
for _hypotheses, _refs in zip(hypotheses, refs):
value += bleu._sentence_bleu(_refs, _hypotheses)
bleu.update(([_hypotheses], [_refs]))
ref_1 = value / len(refs)
ref_2 = bleu.compute()
assert pytest.approx(ref_1) == reference_bleu_score
assert pytest.approx(ref_2) == reference_bleu_score
def test_bleu_batch_micro():
bleu = Bleu(ngram=4, average="micro")
# Batch size 3
hypotheses = [corpus.cand_1, corpus.cand_2a, corpus.cand_2b]
refs = [corpus.references_1, corpus.references_2, corpus.references_2]
bleu.update((hypotheses, refs))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference_bleu_score = corpus_bleu(refs, hypotheses)
assert pytest.approx(bleu.compute()) == reference_bleu_score
assert pytest.approx(bleu._corpus_bleu(refs, hypotheses)) == reference_bleu_score
@pytest.mark.parametrize(
"candidates, references",
[
(corpus.cand_1, corpus.references_1),
(corpus.cand_2a, corpus.references_2),
(corpus.cand_2b, corpus.references_2),
(corpus.cand_1, corpus.references_1),
],
)
def test_n_gram_counter(candidates, references):
bleu = Bleu(ngram=4)
hyp_length, ref_length = bleu._n_gram_counter([references], [candidates], Counter(), Counter())
assert hyp_length == len(candidates)
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - len(candidates)), ref_len))
assert ref_length == closest_ref_len
def _test_macro_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
return data[i + size * rank]
def _test(metric_device):
engine = Engine(update)
m = Bleu(ngram=4, smooth="smooth2")
m.attach(engine, "bleu")
engine.run(data=list(range(size)), max_epochs=1)
assert "bleu" in engine.state.metrics
ref_bleu = 0
for candidates, references in data:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ref_bleu += sentence_bleu(
references[0],
candidates[0],
weights=[0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method2,
)
assert pytest.approx(engine.state.metrics["bleu"]) == ref_bleu / len(data)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_micro_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
return data[i + size * rank]
def _test(metric_device):
engine = Engine(update)
m = Bleu(ngram=4, smooth="smooth2", average="micro")
m.attach(engine, "bleu")
engine.run(data=list(range(size)), max_epochs=1)
assert "bleu" in engine.state.metrics
ref_bleu = 0
references = []
candidates = []
for _candidates, _references in data:
references.append(_references[0])
candidates.append(_candidates[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ref_bleu += corpus_bleu(
references,
candidates,
weights=[0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method2,
)
assert pytest.approx(engine.state.metrics["bleu"]) == ref_bleu
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_macro_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_micro_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
__all__ = ["CorpusForTest"]
class CorpusForTest:
def __init__(self, lower_split=False):
def preproc(text):
if lower_split:
return text.lower().split()
else:
return text
# BLEU Paper examples
self.cand_1 = preproc("the the the the the the the")
self.ref_1a = preproc("The cat is on the mat")
self.ref_1b = preproc("There is a cat on the mat")
self.cand_2a = preproc(
"It is a guide to action which ensures that the military always obeys the commands of the party"
)
self.cand_2b = preproc("It is to insure the troops forever hearing the activity guidebook that " "party direct")
self.ref_2a = preproc(
"It is a guide to action that ensures that the military will forever heed " "Party commands"
)
self.ref_2b = preproc(
"It is the guiding principle which guarantees the military forces always being under the command of "
"the Party"
)
self.ref_2c = preproc("It is the practical guide for the army always to heed the directions of the party")
self.cand_3 = preproc("of the")
self.references_1 = [self.ref_1a, self.ref_1b]
self.references_2 = [self.ref_2a, self.ref_2b, self.ref_2c]
self.sample_1 = ([self.cand_1], [self.references_1])
self.sample_2 = ([self.cand_3], [self.references_2])
self.sample_3 = ([self.cand_2a], [self.references_2])
self.sample_4 = ([self.cand_2b], [self.references_2])
self.sample_5 = ([self.cand_2a, self.cand_2b], [self.references_2, self.references_2])
self.references_3 = [self.ref_2a, self.ref_2b]
self.references_4 = [self.ref_2b, self.ref_2c]
self.references_5 = [self.ref_2a, self.ref_2c]
self.chunks = [
([self.cand_1], [self.references_1]),
([self.cand_2a], [self.references_2]),
([self.cand_2b], [self.references_2]),
([self.cand_1], [[self.ref_1a]]),
([self.cand_2a], [self.references_3]),
([self.cand_2b], [self.references_3]),
([self.cand_1], [[self.ref_1b]]),
([self.cand_2a], [self.references_4]),
([self.cand_2b], [self.references_4]),
([self.cand_1], [self.references_1]),
([self.cand_2a], [self.references_5]),
([self.cand_2b], [self.references_5]),
([self.cand_1], [[self.ref_1a]]),
([self.cand_2a], [[self.ref_2a]]),
([self.cand_2b], [[self.ref_2c]]),
]
|
import os
import nltk
import pytest
import rouge as pyrouge
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Rouge
from ignite.metrics.nlp.rouge import compute_ngram_scores, RougeL, RougeN
from . import CorpusForTest
nltk.download("punkt")
corpus = CorpusForTest()
@pytest.mark.parametrize(
"candidate, reference, n, expected_precision, expected_recall",
[
([], [], 1, 0, 0),
("abc", "ab", 1, 2 / 3, 2 / 2),
("abc", "ab", 2, 1 / 2, 1 / 1),
("abc", "ab", 3, 0, 0),
("ab", "abc", 1, 2 / 2, 2 / 3),
("ab", "cde", 1, 0 / 2, 0 / 3),
("aab", "aace", 1, 2 / 3, 2 / 4),
("aa", "aaa", 1, 2 / 2, 2 / 3),
("aaa", "aa", 1, 2 / 3, 2 / 2),
],
)
def test_compute_ngram_scores(candidate, reference, n, expected_precision, expected_recall):
scores = compute_ngram_scores(candidate, reference, n=n)
assert pytest.approx(scores.precision()) == expected_precision
assert pytest.approx(scores.recall()) == expected_recall
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"ngram order must be greater than zero"):
RougeN(ngram=0)
with pytest.raises(ValueError, match=r"alpha must be in interval \[0, 1\]"):
RougeN(alpha=-1)
with pytest.raises(ValueError, match=r"alpha must be in interval \[0, 1\]"):
RougeN(alpha=2)
with pytest.raises(ValueError, match=r"multiref : valid values are \['best', 'average'\] "):
RougeN(multiref="")
with pytest.raises(ValueError, match=r"variant must be 'L' or integer greater to zero"):
Rouge(variants=["error"])
with pytest.raises(NotComputableError):
RougeL().compute()
with pytest.raises(ValueError):
Rouge(multiref="unknown")
@pytest.mark.parametrize(
"ngram, candidate, reference, expected",
[
(1, [1, 2, 3], [1, 2], (2 / 3, 2 / 2)),
(2, [1, 2, 3], [1, 2], (1 / 2, 1 / 1)),
(1, "abcdef", "zbdfz", (3 / 6, 3 / 5)),
(2, "abcdef", "zbdfz", (0, 0)),
],
)
def test_rouge_n_alpha(ngram, candidate, reference, expected):
for alpha in [0, 1, 0.3, 0.5, 0.8]:
rouge = RougeN(ngram=ngram, alpha=alpha)
rouge.update(([candidate], [[reference]]))
results = rouge.compute()
assert results[f"Rouge-{ngram}-P"] == expected[0]
assert results[f"Rouge-{ngram}-R"] == expected[1]
try:
F = expected[0] * expected[1] / ((1 - alpha) * expected[0] + alpha * expected[1])
except ZeroDivisionError:
F = 0
assert results[f"Rouge-{ngram}-F"] == F
@pytest.mark.parametrize(
"candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5]
)
def test_rouge_metrics(candidates, references):
for multiref in ["average", "best"]:
# PERL 1.5.5 reference
apply_avg = multiref == "average"
apply_best = multiref == "best"
evaluator = pyrouge.Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=4,
apply_avg=apply_avg,
apply_best=apply_best,
alpha=0.5,
stemming=False,
ensure_compatibility=False,
)
scores = evaluator.get_scores(candidates, references)
lower_split_references = [
[ref.lower().split() for ref in refs_per_candidate] for refs_per_candidate in references
]
lower_split_candidates = [candidate.lower().split() for candidate in candidates]
m = Rouge(variants=[1, 2, 4, "L"], multiref=multiref, alpha=0.5)
m.update((lower_split_candidates, lower_split_references))
results = m.compute()
for key in ["1", "2", "4", "L"]:
assert pytest.approx(results[f"Rouge-{key}-R"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["r"]
assert pytest.approx(results[f"Rouge-{key}-P"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["p"]
assert pytest.approx(results[f"Rouge-{key}-F"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["f"]
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
candidate, references = data[i + size * rank]
lower_split_references = [reference.lower().split() for reference in references[0]]
lower_split_candidate = candidate[0].lower().split()
return [lower_split_candidate], [lower_split_references]
def _test(metric_device):
engine = Engine(update)
m = Rouge(variants=[1, 2, "L"], alpha=0.5, device=metric_device)
m.attach(engine, "rouge")
engine.run(data=list(range(size)), max_epochs=1)
assert "rouge" in engine.state.metrics
evaluator = pyrouge.Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=4,
apply_avg=True,
apply_best=False,
alpha=0.5,
stemming=False,
ensure_compatibility=False,
)
rouge_1_f, rouge_2_f, rouge_l_f = (0, 0, 0)
for candidate, references in data:
scores = evaluator.get_scores(candidate, references)
rouge_1_f += scores["rouge-1"]["f"]
rouge_2_f += scores["rouge-2"]["f"]
rouge_l_f += scores["rouge-l"]["f"]
assert pytest.approx(engine.state.metrics["Rouge-1-F"], abs=1e-4) == rouge_1_f / len(data)
assert pytest.approx(engine.state.metrics["Rouge-2-F"], abs=1e-4) == rouge_2_f / len(data)
assert pytest.approx(engine.state.metrics["Rouge-L-F"], abs=1e-4) == rouge_l_f / len(data)
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import argparse
import torch
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
import time
time.sleep(idist.get_rank() * 0.1)
print(idist.get_rank(), ": run with config:", config, "- kwargs:", kwargs, f"- backend={idist.backend()}")
t = torch.tensor([idist.get_rank()], device=idist.device())
t = idist.all_reduce(t)
t = t.item()
ws = idist.get_world_size()
assert t == ws * (ws - 1) / 2, f"{t} vs {ws}"
assert local_rank == idist.get_local_rank()
# Test init method:
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
true_init_method = config.get("true_init_method", None)
assert true_init_method is not None, true_init_method
assert _model._init_method == true_init_method
if __name__ == "__main__":
"""
Usage:
- No distributed configuration:
```
python tests/ignite/distributed/check_idist_parallel.py
```
- Launch 4 procs using gloo backend with `torchrun`:
```
torchrun --nproc_per_node=4 tests/ignite/distributed/check_idist_parallel.py --backend=gloo
```
- Launch 2 procs in 2 nodes using gloo backend with `torchrun` or `torch.distributed.launch`:
```
bash -c "torchrun --nnodes=2 --node_rank=0 \
--master_addr=localhost --master_port=3344 --nproc_per_node=2 \
tests/ignite/distributed/check_idist_parallel.py --backend=gloo &" \
&& bash -c "torchrun --nnodes=2 --node_rank=1 \
--master_addr=localhost --master_port=3344 --nproc_per_node=2 \
tests/ignite/distributed/check_idist_parallel.py --backend=gloo &"
```
- Spawn 4 procs in single node using gloo backend:
```
python tests/ignite/distributed/check_idist_parallel.py --backend=gloo --nproc_per_node=4
```
- Spawn 2 procs in 2 nodes using gloo backend:
```
bash -c "python tests/ignite/distributed/check_idist_parallel.py --backend=gloo \
--nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=localhost --master_port=3344 &" \
&& bash -c "python tests/ignite/distributed/check_idist_parallel.py --backend=gloo \
--nproc_per_node=2 --nnodes=2 --node_rank=1 --master_addr=localhost --master_port=3344 &"
```
- Spawn 8 procs in single node using xla-tpu backend:
```
python tests/ignite/distributed/check_idist_parallel.py --backend=xla-tpu --nproc_per_node=8
```
"""
parser = argparse.ArgumentParser("Check idist.Parallel")
parser.add_argument("--backend", type=str, default=None)
parser.add_argument("--nproc_per_node", type=int, default=None)
parser.add_argument("--nnodes", type=int, default=None)
parser.add_argument("--node_rank", type=int, default=None)
parser.add_argument("--master_addr", type=str, default=None)
parser.add_argument("--master_port", type=str, default=None)
parser.add_argument("--init_method", type=str, default=None)
args = parser.parse_args()
config = {
"model": "resnet18",
"lr": 0.01,
}
if args.backend in ["gloo", "nccl"]:
config["true_init_method"] = args.init_method if args.init_method is not None else "env://"
dist_config = dict(
nproc_per_node=args.nproc_per_node,
nnodes=args.nnodes,
node_rank=args.node_rank,
master_addr=args.master_addr,
master_port=args.master_port,
)
if args.init_method is not None:
dist_config["init_method"] = args.init_method
with idist.Parallel(backend=args.backend, **dist_config) as parallel:
parallel.run(training, config, a=1, b=2)
|
import os
import pytest
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import _InfiniteConstantSampler
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import BatchSampler, RandomSampler, Sampler, SequentialSampler, WeightedRandomSampler
import ignite.distributed as idist
from ignite.distributed.auto import auto_dataloader, auto_model, auto_optim, DistributedProxySampler
class DummyDS(Dataset):
def __init__(self, length=10):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, index):
return index
class DummyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(DummyIterableDataset).__init__()
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def __len__(self):
return self.end - self.start
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if WORLD_SIZE not in env vars")
def test_auto_dataloader_warning(distributed_context_single_node_gloo):
with pytest.warns(UserWarning, match=r"Found batch_sampler in provided kwargs"):
auto_dataloader(
DummyDS(), batch_sampler=BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)
)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if WORLD_SIZE not in env vars")
def test_auto_dataloader_warning_distributed_sampler(distributed_context_single_node_gloo):
dataset = DummyDS()
rank = idist.get_rank()
world_size = idist.get_world_size()
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size, rank=rank))
if world_size > 1:
wrong_rank = (rank + 1) % world_size
expected_warning = f"Found distributed sampler with rank={wrong_rank}, but process rank is {rank}"
with pytest.warns(UserWarning, match=expected_warning):
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size, rank=wrong_rank))
expected_warning = f"Found distributed sampler with num_replicas={world_size + 1}, but world size is {world_size}"
with pytest.warns(UserWarning, match=expected_warning):
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size + 1, rank=rank))
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_dataloader_warning_tpu():
with pytest.warns(UserWarning, match=r"Found incompatible options: xla support and pin_memory"):
auto_dataloader(DummyDS(), pin_memory=True)
def _test_auto_dataloader(ws, nproc, batch_size, num_workers=1, sampler_name=None, dl_type=DataLoader):
def _test(data):
if sampler_name is None:
sampler = None
elif sampler_name == "WeightedRandomSampler":
sampler = WeightedRandomSampler(weights=torch.ones(100), num_samples=100)
elif sampler_name == "DistributedSampler":
sampler = DistributedSampler(data, num_replicas=ws, rank=idist.get_rank())
else:
raise RuntimeError(f"Unknown sampler name: {sampler_name}")
# Test auto_dataloader
assert idist.get_world_size() == ws, f"{idist.get_world_size()} vs {ws}"
shuffle = sampler is None if not isinstance(data, IterableDataset) else False
dataloader = auto_dataloader(
data, batch_size=batch_size, num_workers=num_workers, sampler=sampler, shuffle=shuffle
)
assert isinstance(dataloader, dl_type)
if hasattr(dataloader, "_loader"):
dataloader = dataloader._loader
if ws < batch_size:
assert dataloader.batch_size == batch_size // ws
else:
assert dataloader.batch_size == batch_size
if ws <= num_workers:
assert dataloader.num_workers == (num_workers + nproc - 1) // nproc
else:
assert dataloader.num_workers == num_workers
if isinstance(data, IterableDataset):
sampler_type = _InfiniteConstantSampler
elif ws > 1:
if sampler is None or isinstance(sampler, DistributedSampler):
sampler_type = DistributedSampler
else:
sampler_type = DistributedProxySampler
else:
sampler_type = RandomSampler if sampler is None else type(sampler)
assert isinstance(dataloader.sampler, sampler_type)
if isinstance(dataloader, DataLoader):
assert dataloader.pin_memory == ("cuda" in idist.device().type)
data = torch.rand(100, 3, 12, 12)
_test(data)
if sampler_name is None:
data = DummyIterableDataset(0, 100)
_test(data)
def _test_auto_model(model, ws, device, sync_bn=False, **kwargs):
model = auto_model(model, sync_bn=sync_bn, **kwargs)
bnd = idist.backend()
if ws > 1 and torch.device(device).type in ("cuda", "cpu"):
if idist.has_native_dist_support and bnd in ("nccl", "gloo"):
assert isinstance(model, nn.parallel.DistributedDataParallel)
if sync_bn:
assert any([isinstance(m, nn.SyncBatchNorm) for m in model.modules()])
if "find_unused_parameters" in kwargs:
assert model.find_unused_parameters == kwargs["find_unused_parameters"]
elif idist.has_hvd_support and bnd in ("horovod",):
assert isinstance(model, nn.Module)
elif device != "cpu" and torch.cuda.is_available() and torch.cuda.device_count() > 1:
assert isinstance(model, nn.parallel.DataParallel)
else:
assert isinstance(model, nn.Module)
assert all(
[p.device.type == torch.device(device).type for p in model.parameters()]
), f"{[p.device.type for p in model.parameters()]} vs {torch.device(device).type}"
def _test_auto_model_optimizer(ws, device):
# Test auto_model
model = nn.Linear(10, 10)
_test_auto_model(model, ws, device)
model = nn.Sequential(nn.Linear(20, 100), nn.BatchNorm1d(100))
_test_auto_model(model, ws, device, sync_bn="cuda" in torch.device(device).type)
if ws > 1:
_test_auto_model(model, ws, device, find_unused_parameters=True)
_test_auto_model(model, ws, device, find_unused_parameters=False)
# Test auto_optim
bnd = idist.backend()
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = auto_optim(optimizer)
if idist.has_xla_support and "xla" in device:
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "wrapped_optimizer")
elif idist.has_hvd_support and bnd in ("horovod",):
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "_allreduce_grad_async")
else:
assert isinstance(optimizer, optim.SGD) and not hasattr(optimizer, "wrapped_optimizer")
if idist.has_hvd_support and bnd in ("horovod",):
backward_passes_per_step = 2
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = auto_optim(optimizer, backward_passes_per_step=backward_passes_per_step)
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "backward_passes_per_step")
assert optimizer.backward_passes_per_step == backward_passes_per_step
def test_auto_methods_no_dist():
_test_auto_dataloader(1, 1, batch_size=1)
_test_auto_dataloader(1, 1, batch_size=10, num_workers=2)
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="DistributedSampler")
_test_auto_model_optimizer(1, "cuda" if torch.cuda.is_available() else "cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_auto_methods_gloo(distributed_context_single_node_gloo):
ws = distributed_context_single_node_gloo["world_size"]
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=2)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, sampler_name="DistributedSampler")
device = idist.device()
_test_auto_model_optimizer(ws, device)
if ws > 1 and device.type == "cpu":
# Pytorch <= 1.9.0 => AssertionError
# Pytorch > 1.9 => ValueError
# https://github.com/pytorch/pytorch/blob/master/torch/nn/parallel/distributed.py#L1498
with pytest.raises(
(AssertionError, ValueError), match=r"SyncBatchNorm layers only work with (GPU|CUDA) modules"
):
model = nn.Sequential(nn.Linear(20, 100), nn.BatchNorm1d(100))
auto_model(model, sync_bn=True)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_auto_methods_nccl(distributed_context_single_node_nccl):
ws = distributed_context_single_node_nccl["world_size"]
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=10)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="DistributedSampler")
device = idist.device()
_test_auto_model_optimizer(ws, device)
if ws > 1:
with pytest.raises(ValueError, match=r"Argument kwargs should not contain 'device_ids'"):
auto_model(nn.Linear(1, 1), device_ids=[0])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_auto_methods_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 10, 10), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1, 1, "WeightedRandomSampler"), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1, 1, "DistributedSampler"), np=np, do_init=True)
gloo_hvd_executor(_test_auto_model_optimizer, args=(np, device), np=np, do_init=True)
def _test_auto_methods_xla(index, ws):
dl_type = DataLoader
if ws > 1:
from ignite.distributed.auto import _MpDeviceLoader
dl_type = _MpDeviceLoader
try:
from torch_xla.distributed.parallel_loader import MpDeviceLoader
dl_type = MpDeviceLoader
except ImportError:
pass
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=2, dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="WeightedRandomSampler", dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="DistributedSampler", dl_type=dl_type)
device = "xla"
_test_auto_model_optimizer(ws, device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_methods_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_auto_methods_xla, args=(n,), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_methods_xla():
_test_auto_methods_xla(index=0, ws=1)
def test_dist_proxy_sampler():
weights = torch.ones(100)
weights[:50] += 1
num_samples = 200
sampler = WeightedRandomSampler(weights, num_samples)
num_replicas = 8
dist_samplers = [DistributedProxySampler(sampler, num_replicas=num_replicas, rank=i) for i in range(num_replicas)]
for seed in range(100):
torch.manual_seed(seed)
true_indices = list(sampler)
indices_per_rank = []
for s in dist_samplers:
s.set_epoch(seed)
indices_per_rank += list(s)
set_indices_per_rank = set(indices_per_rank)
set_true_indices = set(true_indices)
assert (
set_indices_per_rank == set_true_indices
), f"{set_true_indices - set_indices_per_rank} | {set_indices_per_rank - set_true_indices}"
with pytest.raises(TypeError, match=r"Argument sampler should be instance of torch Sampler"):
DistributedProxySampler(None)
with pytest.raises(TypeError, match=r"Argument sampler should have length"):
DistributedProxySampler(Sampler([1]))
with pytest.raises(TypeError, match=r"Argument sampler must not be a distributed sampler already"):
DistributedProxySampler(DistributedSampler(sampler, num_replicas=num_replicas, rank=0))
|
import os
import subprocess
import sys
from pathlib import Path
import pytest
import torch
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support, has_native_dist_support, has_xla_support
def test_parallel_wrong_inputs():
with pytest.raises(ValueError, match=r"Unknown backend 'abc'. Available backends:"):
idist.Parallel(backend="abc")
with pytest.raises(ValueError, match=r"If backend is None, argument 'nnodes' should be also None"):
idist.Parallel(nnodes=2)
with pytest.raises(ValueError, match=r"Argument nproc_per_node should positive"):
idist.Parallel(backend="gloo", nproc_per_node=-1)
with pytest.raises(ValueError, match=r"Argument nnodes should positive"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=-1)
with pytest.raises(ValueError, match=r"If number of nodes larger than one"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2)
with pytest.raises(ValueError, match=r"Argument node_rank should be between 0 and"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=2)
with pytest.raises(ValueError, match=r"If number of nodes larger than one, arguments master_addr and master_port"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=1)
@pytest.fixture()
def exec_filepath():
fp = Path(__file__).parent / "check_idist_parallel.py"
assert fp.exists()
yield fp.as_posix()
def execute(cmd, env=None):
import ignite
env = dict(os.environ) if env is None else env
env["PYTHONPATH"] = f"{os.path.dirname(ignite.__path__[0])}"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
process.wait()
if process.returncode != 0:
print(str(process.stdout.read()) + str(process.stderr.read()))
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd, stderr=process.stderr.read())
return str(process.stdout.read()) + str(process.stderr.read())
def test_check_idist_parallel_no_dist(exec_filepath):
cmd = [sys.executable, "-u", exec_filepath]
out = execute(cmd)
assert "backend=None" in out
assert "in 1 processes" in out
assert "End of run" in out
def _test_check_idist_parallel_torch_launch(init_method, fp, backend, nprocs):
# torchrun --nproc_per_node=nprocs tests/ignite/distributed/check_idist_parallel.py --backend=backend
cmd = []
if Version(torch.__version__) >= Version("1.10.0"):
cmd += ["torchrun"]
else:
cmd += [
sys.executable,
"-m",
"torch.distributed.launch",
"--use_env",
]
cmd += [
f"--nproc_per_node={nprocs}",
fp,
f"--backend={backend}",
]
if init_method is not None:
cmd.append(f"--init_method={init_method}")
out = execute(cmd)
assert f"backend={backend}" in out
assert f"in {nprocs} processes" in out
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip because test uses torch launch")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_check_idist_parallel_torch_launch_n_procs_native(init_method, dirname, exec_filepath, backend):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
np = torch.cuda.device_count() if torch.cuda.is_available() else 4
_test_check_idist_parallel_torch_launch(init_method, exec_filepath, backend, np)
def _test_check_idist_parallel_hvdrun(fp, backend, nprocs):
# horovodrun -np=nprocs python tests/ignite/distributed/check_idist_parallel.py --backend=backend
cmd = [
"horovodrun",
"-np",
f"{nprocs}",
sys.executable,
fp,
f"--backend={backend}",
]
out = execute(cmd)
assert f"backend={backend}" in out
assert f"in {nprocs} processes" in out
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip because test uses horovodrun")
def test_check_idist_parallel_hvdrun_launch_n_procs(exec_filepath):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_hvdrun(exec_filepath, "horovod", np)
def _test_check_idist_parallel_spawn(fp, backend, nprocs):
# python tests/ignite/distributed/check_idist_parallel.py --backend=backend --nproc_per_node=nprocs
cmd = [sys.executable, fp, f"--backend={backend}", f"--nproc_per_node={nprocs}"]
out = execute(cmd)
assert f"backend={backend}" in out
assert "Spawn function" in out
assert f"in {nprocs} processes" in out
if "xla" not in backend:
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_check_idist_parallel_spawn_n_procs_native(exec_filepath, backend):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_spawn(exec_filepath, backend, np)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_smoke_test_check_idist_parallel_spawn_multinode_n_procs_gloo(exec_filepath):
# Just a smoke test from check_idist_parallel.py for an emulated multi-node configuration
cmd1 = "export CUDA_VISIBLE_DEVICES= && "
cmd1 += f'bash -c "{sys.executable} {exec_filepath} --backend=gloo --nproc_per_node=2 '
cmd1 += '--nnodes=2 --node_rank=0 --master_addr=localhost --master_port=3344 &"'
os.system(cmd1)
cmd2 = [
sys.executable,
exec_filepath,
"--backend=gloo",
"--nproc_per_node=2",
"--nnodes=2",
"--node_rank=1",
"--master_addr=localhost",
"--master_port=3344",
]
env = dict(os.environ)
env["CUDA_VISIBLE_DEVICES"] = ""
out = execute(cmd2, env=env)
assert "backend=gloo" in out
assert "nproc_per_node: 2" in out
assert "nnodes: 2" in out
assert "master_addr: localhost" in out
assert "master_port: 3344" in out
assert "End of run" in out
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_check_idist_parallel_spawn_n_procs_xla(exec_filepath):
n = int(os.environ["NUM_TPU_WORKERS"])
if n > 1:
_test_check_idist_parallel_spawn(exec_filepath, "xla-tpu", n)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_check_idist_parallel_spawn_n_procs_hvd(exec_filepath):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_spawn(exec_filepath, "horovod", np)
def _test_func(index, ws, device, backend, true_init_method):
assert 0 <= index < ws
assert index == idist.get_local_rank()
assert ws == idist.get_world_size()
assert torch.device(device).type == idist.device().type
assert backend == idist.backend()
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
assert _model._init_method == true_init_method
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", ["env://", "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_idist_parallel_spawn_n_procs_native(init_method, backend, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc_per_node = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node, init_method=init_method) as parallel:
parallel.run(_test_func, ws=nproc_per_node, device=device, backend=backend, true_init_method=init_method)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if not launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", ["env://", "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_idist_parallel_n_procs_native(init_method, backend, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('idist_parallel_n_procs_native')}/shared"
os.environ["RANK"] = str(local_rank)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=backend, init_method=init_method) as parallel:
parallel.run(_test_func, ws=world_size, device=device, backend=backend, true_init_method=init_method)
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_parallel_no_dist():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=None) as parallel:
parallel.run(_test_func, ws=1, device=device, backend=None, true_init_method=None)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_parallel_spawn_params_xla():
res = idist.Parallel._setup_spawn_params(
nproc_per_node=8, nnodes=None, node_rank=None, master_addr=None, master_port=None, start_method="fork"
)
assert "nproc_per_node" in res and res["nproc_per_node"] == 8
assert "start_method" in res and res["start_method"] == "fork"
with idist.Parallel(backend="xla-tpu", nproc_per_node=8, start_method="fork") as parallel:
assert parallel.backend == "xla-tpu"
res = parallel._spawn_params
assert "nproc_per_node" in res and res["nproc_per_node"] == 8
assert "start_method" in res and res["start_method"] == "fork"
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.skipif(has_hvd_support, reason="Skip if has Horovod package")
def test_hvd_distrib_spawn_no_hvd_support():
with pytest.raises(ValueError, match=r"Backend should be one of"):
idist.spawn("horovod", _test_distrib_config, args=("horovod", 1, "cpu"), nproc_per_node=1)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
def test_hvd_distrib_single_node_single_device():
import horovod.torch as hvd
idist.initialize("horovod")
device = "cpu" if torch.cuda.device_count() < 1 else "cuda"
local_rank = hvd.local_rank()
world_size = hvd.size()
rank = hvd.rank()
_test_distrib_config(local_rank, "horovod", world_size, device, rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test_hvd_distrib_single_node_spawn():
world_size = 4
idist.spawn("horovod", _test_distrib_config, args=("horovod", world_size, "cpu"), nproc_per_node=world_size)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_hvd_distrib_multi_node_spawn_raise_error():
world_size = 4
with pytest.raises(RuntimeError, match=r"For multi-node configuration, please set 'hosts' argument instead"):
idist.spawn(
"horovod", _test_distrib_config, args=("horovod", world_size, "cpu"), nproc_per_node=world_size, nnodes=2
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_hvd_distrib_single_node_spawn_cuda():
world_size = torch.cuda.device_count()
idist.spawn("horovod", _test_distrib_config, args=("horovod", world_size, "cuda"), nproc_per_node=world_size)
def _test_sync_as_hvd():
import horovod.torch as hvd
from ignite.distributed.comp_models.horovod import _HorovodDistModel
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
_test_sync(_HorovodDistModel)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif(os.getenv("HOROVOD_RANK", -1) == -1, reason="Skip as controller is not Gloo")
def test_sync_as_hvd():
_test_sync_as_hvd()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_sync_as_hvd_inside_gloo_executor(gloo_hvd_executor):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_sync_as_hvd, (), np=np)
def _test_idist_methods_in_hvd_context(backend, device):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
import horovod.torch as hvd
from ignite.distributed.utils import _SerialModel, _set_model
hvd.init()
_set_model(_SerialModel())
ws = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_methods_in_hvd_context(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_idist_methods_in_hvd_context, ("horovod", device), np=np)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_all_reduce_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_all_reduce, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_all_reduce_group, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist__model_methods_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib__get_max_length, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_all_gather_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_all_gather, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_all_gather_group, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_broadcast_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_broadcast, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_barrier_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_barrier, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_new_group_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_new_group, (device,), np=np, do_init=True)
def _test_idist_methods_overhead(ok_factor, sync_model):
import time
import horovod.torch as hvd
if sync_model:
idist.sync()
from ignite.distributed.comp_models.horovod import _HorovodDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _HorovodDistModel)
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for _ in range(m):
start = time.time()
for _ in range(n):
_ = hvd.size()
_ = hvd.rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_methods_overhead_hvd(gloo_hvd_executor):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
ok_factor = 6.0
sync_model = False
gloo_hvd_executor(_test_idist_methods_overhead, (ok_factor, sync_model), np=np, do_init=True)
ok_factor = 2.5
sync_model = True
gloo_hvd_executor(_test_idist_methods_overhead, (ok_factor, sync_model), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_one_rank_only(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_one_rank_only, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_one_rank_only_with_engine, (device,), np=np, do_init=True)
|
import torch
import ignite.distributed as idist
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_new_group,
_test_sync,
)
def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
else:
assert idist.device().type == "cpu"
assert idist.get_rank() == 0
assert idist.get_world_size() == 1
assert idist.get_local_rank() == 0
assert idist.model_name() == "serial"
from ignite.distributed.utils import _model, _SerialModel
_sanity_check()
assert isinstance(_model, _SerialModel)
idist.show_config()
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "ignite.distributed.utils INFO: distributed configuration: serial" in out[-1]
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
assert "ignite.distributed.utils INFO: rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: local rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: world size: 1" in out[-1]
def test_sync_no_dist():
from ignite.distributed.comp_models import _SerialModel
_test_sync(_SerialModel)
def test_idist_methods_no_dist():
assert idist.get_world_size() < 2
assert idist.backend() is None, f"{idist.backend()}"
def test_idist__model_methods_no_dist():
_test_distrib__get_max_length("cpu")
if torch.cuda.device_count() > 1:
_test_distrib__get_max_length("cuda")
def test_idist_collective_ops_no_dist():
_test_distrib_all_reduce("cpu")
_test_distrib_all_gather("cpu")
_test_distrib_barrier("cpu")
_test_distrib_broadcast("cpu")
_test_distrib_new_group("cpu")
if torch.cuda.device_count() > 1:
_test_distrib_all_reduce("cuda")
_test_distrib_all_gather("cuda")
_test_distrib_barrier("cuda")
_test_distrib_broadcast("cuda")
_test_distrib_new_group("cuda")
|
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
from ignite.distributed.utils import sync
from ignite.engine import Engine, Events
def _sanity_check():
from ignite.distributed.utils import _model
assert _model.get_world_size() == _model.get_nnodes() * _model.get_nproc_per_node()
assert _model.get_local_rank() < _model.get_nproc_per_node()
assert _model.get_rank() < _model.get_world_size()
assert _model.get_node_rank() < _model.get_nnodes()
def _test_distrib_config(local_rank, backend, ws, true_device, rank=None, true_init_method=None):
assert idist.backend() == backend, f"{idist.backend()} vs {backend}"
this_device = idist.device()
assert isinstance(this_device, torch.device)
if backend in ("nccl", "gloo", "horovod") and "cuda" in this_device.type:
assert this_device.type == torch.device(true_device).type, f"{this_device} vs {true_device}"
elif backend in ("gloo", "horovod"):
assert this_device.type == torch.device(true_device).type
elif backend == "xla-tpu":
assert true_device in this_device.type
if rank is None:
if idist.model_name() == "native-dist":
rank = dist.get_rank()
if rank is not None:
assert idist.get_rank() == rank
assert idist.get_world_size() == ws
assert idist.get_local_rank() == local_rank
assert idist.model_name() in ("native-dist", "xla-dist", "horovod-dist")
_sanity_check()
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
if true_init_method is not None:
assert _model._init_method == true_init_method
def _test_sync(cls):
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
sync()
from ignite.distributed.utils import _model
assert isinstance(_model, cls), f"{type(_model)} vs {cls}"
def _test_distrib__get_max_length(device):
ws = idist.get_world_size()
x = "_test_distrib__get_max_length" * (idist.get_rank() + 2)
from ignite.distributed.utils import _model
res = _model._get_max_length(x, device)
assert res == len("_test_distrib__get_max_length" * (ws + 1))
def _test_distrib_all_reduce(device):
res = idist.all_reduce(10)
assert res == 10 * idist.get_world_size()
t = torch.tensor(10, device=device)
res = idist.all_reduce(t)
assert res.item() == 10 * idist.get_world_size()
rank = idist.get_rank()
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t)
assert res.item() == sum([i * 2.0 + 1.0 for i in range(idist.get_world_size())])
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "MIN").item()
true_val = min([i * 2 + 1 for i in range(idist.get_world_size())])
assert res == true_val, f"{res} vs {true_val}"
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "MAX").item()
true_val = max([i * 2.0 + 1.0 for i in range(idist.get_world_size())])
assert res == true_val, f"{res} vs {true_val}"
t = torch.ones(4, 4, device=device) * (rank * 2.0 + 1.0)
res = idist.all_reduce(t, "MAX")
true_val = torch.ones(4, 4, device=device) * ((idist.get_world_size() - 1) * 2.0 + 1.0)
assert res.equal(true_val), f"{res} vs {true_val}"
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "PRODUCT").item()
true_val = 1
for v in [i * 2.0 + 1.0 for i in range(idist.get_world_size())]:
true_val *= v
assert res == true_val, f"{res} vs {true_val}"
if idist.get_world_size() > 1:
with pytest.raises(TypeError, match=r"Unhandled input type"):
idist.all_reduce("abc")
with pytest.raises(ValueError, match=r"Unsupported reduction operation"):
idist.all_reduce(10, op="ABC")
t = torch.tensor([0, 1, 2])
res = idist.all_reduce(t)
assert res.device == t.device, f"{res.device} vs {t.device}"
def _test_distrib_all_reduce_group(device):
if idist.get_world_size() > 1 and idist.backend() is not None:
ranks = [0, 1]
rank = idist.get_rank()
t = torch.tensor([rank], device=device)
bnd = idist.backend()
group = idist.new_group(ranks)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group=group)
else:
res = idist.all_reduce(t, group=group)
assert res == torch.tensor([sum(ranks)], device=device)
t = torch.tensor([rank], device=device)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group=ranks)
else:
res = idist.all_reduce(t, group=ranks)
assert res == torch.tensor([sum(ranks)], device=device)
ranks = "abc"
if bnd in ("nccl", "gloo", "mpi"):
with pytest.raises(ValueError, match=r"Argument group should be list of int or ProcessGroup"):
res = idist.all_reduce(t, group="abc")
elif bnd in ("xla-tpu"):
with pytest.raises(ValueError, match=r"Argument group should be list of int"):
res = idist.all_reduce(t, group="abc")
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group="abc")
def _test_distrib_all_gather(device):
rank = idist.get_rank()
ws = idist.get_world_size()
res = torch.tensor(idist.all_gather(10), device=device)
true_res = torch.tensor([10] * ws, device=device)
assert (res == true_res).all()
t = torch.tensor(rank, device=device)
res = idist.all_gather(t)
true_res = torch.tensor([i for i in range(ws)], device=device)
assert (res == true_res).all()
x = "test-test"
if rank == 0:
x = "abc"
res = idist.all_gather(x)
true_res = ["abc"] + ["test-test"] * (ws - 1)
assert res == true_res
base_x = "tests/ignite/distributed/utils/test_native.py" * 2000
x = base_x
if rank == 0:
x = "abc"
res = idist.all_gather(x)
true_res = ["abc"] + [base_x] * (ws - 1)
assert res == true_res
t = torch.arange(100, device=device).reshape(4, 25) * (rank + 1)
in_dtype = t.dtype
res = idist.all_gather(t)
assert res.shape == (ws * 4, 25)
assert res.dtype == in_dtype
true_res = torch.zeros(ws * 4, 25, device=device)
for i in range(ws):
true_res[i * 4 : (i + 1) * 4, ...] = torch.arange(100, device=device).reshape(4, 25) * (i + 1)
assert (res == true_res).all()
if ws > 1 and idist.backend() != "xla-tpu":
t = {
"a": [rank + 1, rank + 2, torch.tensor(rank + 3, device=device)],
"b": torch.tensor([[rank + 1, rank + 2, rank + 3]], device=device),
"c": {"abcd": rank, "cdfg": torch.tensor(rank, dtype=torch.uint8, device=device)},
}
res = idist.all_gather(t)
assert isinstance(res, list) and len(res) == ws
for i, obj in enumerate(res):
assert isinstance(obj, dict)
assert list(obj.keys()) == ["a", "b", "c"], obj
expected_device = (
device if torch.device(device).type == "cpu" else torch.device(f"{torch.device(device).type}:{i}")
)
expected = {
"a": [i + 1, i + 2, torch.tensor(i + 3, device=expected_device)],
"b": torch.tensor([[i + 1, i + 2, i + 3]], device=expected_device),
"c": {"abcd": i, "cdfg": torch.tensor(i, dtype=torch.uint8, device=expected_device)},
}
assert obj["a"] == expected["a"]
assert (obj["b"] == expected["b"]).all()
assert obj["c"] == expected["c"]
def _test_distrib_all_gather_group(device):
if idist.get_world_size() > 1:
ranks = list(range(idist.get_world_size() - 1, 0, -1)) # [0, 1, 2, 3] -> [3, 2, 1]
rank = idist.get_rank()
bnd = idist.backend()
t = torch.tensor([rank], device=device)
group = idist.new_group(ranks)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=group)
else:
res = idist.all_gather(t, group=group)
if rank in ranks:
assert torch.equal(res, torch.tensor(ranks, device=device))
else:
assert res == t
t = torch.tensor([rank], device=device)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=ranks)
else:
res = idist.all_gather(t, group=ranks)
if rank in ranks:
assert torch.equal(res, torch.tensor(ranks, device=device))
else:
assert res == t
t = {
"a": [rank + 1, rank + 2, torch.tensor(rank + 3, device=device)],
"b": torch.tensor([[rank + 1, rank + 2, rank + 3]], device=device),
"c": {"abcd": rank, "cdfg": torch.tensor(rank, dtype=torch.uint8, device=device)},
}
if bnd in ("xla-tpu"):
with pytest.raises(NotImplementedError, match=r"all_gather on object is not implemented for xla"):
res = idist.all_gather(t, group=ranks)
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=ranks)
else:
res = idist.all_gather(t, group=ranks)
if rank in ranks:
assert isinstance(res, list) and len(res) == len(ranks)
for i, obj in zip(ranks, res):
assert isinstance(obj, dict)
assert list(obj.keys()) == ["a", "b", "c"], obj
expected_device = (
device
if torch.device(device).type == "cpu"
else torch.device(f"{torch.device(device).type}:{i}")
)
expected = {
"a": [i + 1, i + 2, torch.tensor(i + 3, device=expected_device)],
"b": torch.tensor([[i + 1, i + 2, i + 3]], device=expected_device),
"c": {"abcd": i, "cdfg": torch.tensor(i, dtype=torch.uint8, device=expected_device)},
}
assert obj["a"] == expected["a"], (obj, expected)
assert (obj["b"] == expected["b"]).all(), (obj, expected)
assert obj["c"] == expected["c"], (obj, expected)
else:
assert res == t
if bnd in ("nccl", "gloo", "mpi"):
with pytest.raises(ValueError, match=r"Argument group should be list of int or ProcessGroup"):
res = idist.all_gather(t, group="abc")
elif bnd in ("xla-tpu"):
with pytest.raises(ValueError, match=r"Argument group should be list of int"):
res = idist.all_gather(t, group="abc")
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group="abc")
def _test_distrib_broadcast(device):
rank = idist.get_rank()
ws = idist.get_world_size()
def _test(data_src, data_others, safe_mode):
for src in range(ws):
data = data_src if rank == src else data_others
res = idist.broadcast(data, src=src, safe_mode=safe_mode)
if isinstance(res, torch.Tensor):
assert (res == data_src).all(), f"{res} vs {data_src}"
assert data_src.dtype == res.dtype
else:
assert res == data_src, f"{res} vs {data_src}"
_test(10, 0, safe_mode=False)
_test(10, None, safe_mode=True)
t = torch.tensor([1.2345, 2.3456], dtype=torch.float, device=device)
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, "abc", safe_mode=True)
_test("test-abcdefg", "", safe_mode=False)
_test("test-abcdefg", None, safe_mode=True)
_test("test-abcdefg", 1.2, safe_mode=True)
s = "tests/ignite/distributed/utils/test_horovod.py::test_idist_broadcast_hvd" * 200
_test(s, "", safe_mode=False)
_test(s, None, safe_mode=True)
_test(s, 123.0, safe_mode=True)
t = torch.arange(100, device=device).reshape(4, 25) * 2
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, "None", safe_mode=True)
t = torch.tensor(12)
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, 123.4, safe_mode=True)
if idist.get_world_size() > 1:
with pytest.raises(TypeError, match=r"Unhandled input type"):
idist.broadcast([0, 1, 2], src=0)
if idist.get_world_size() > 1:
msg = "Source data can not be None" if rank == 0 else "Argument safe_mode should be True"
with pytest.raises(ValueError, match=msg):
idist.broadcast(None, src=0)
def _test_distrib_barrier(device):
t = torch.tensor([idist.get_rank()], device=device, dtype=torch.float)
true_res = sum([i for i in range(idist.get_world_size())])
if idist.get_rank() == 0:
t += 10.0
idist.barrier()
tt = idist.all_reduce(t)
assert tt.item() == true_res + 10.0
def _test_distrib_new_group(device):
if idist.get_world_size() > 1 and idist.backend() is not None:
bnd = idist.backend()
ranks = [0, 1]
if idist.has_native_dist_support and bnd in ("nccl", "gloo", "mpi"):
g1 = idist.new_group(ranks)
g2 = dist.new_group(ranks)
rank = idist.get_rank()
if rank in ranks:
assert g1.rank() == g2.rank()
elif idist.has_xla_support and bnd in ("xla-tpu"):
assert idist.new_group(ranks) == [ranks]
elif idist.has_hvd_support and bnd in ("horovod"):
from horovod.common.process_sets import ProcessSet
g1 = idist.new_group(ranks)
g2 = ProcessSet(ranks)
rank = idist.get_rank()
if rank in ranks:
assert g1.ranks == g2.ranks
elif idist.backend() is None:
ranks = [0, 1]
assert idist.new_group(ranks) == ranks
with pytest.raises(ValueError, match="Argument ranks should be list of int"):
ranks = ["a", "b", "c"]
idist.new_group(ranks)
with pytest.raises(ValueError, match="Argument ranks should be list of int"):
ranks = 1
idist.new_group(ranks)
def _test_distrib_one_rank_only(device):
def _test(barrier):
# last rank
rank = idist.get_world_size() - 1
value = torch.tensor(0).to(device)
@idist.one_rank_only(rank=rank, with_barrier=barrier)
def initialize():
value.add_(torch.tensor(100).to(device))
initialize()
value_list = idist.all_gather(tensor=value)
for r in range(idist.get_world_size()):
if r == rank:
assert value_list[r].item() == 100
else:
assert value_list[r].item() == 0
_test(barrier=True)
_test(barrier=False)
def _test_distrib_one_rank_only_with_engine(device):
def _test(barrier):
engine = Engine(lambda e, b: b)
batch_sum = torch.tensor(0).to(device)
@engine.on(Events.ITERATION_COMPLETED)
@idist.one_rank_only(with_barrier=barrier) # ie rank == 0
def _(_):
batch_sum.data += torch.tensor(engine.state.batch).to(device)
engine.run([1, 2, 3], max_epochs=2)
value_list = idist.all_gather(tensor=batch_sum)
for r in range(idist.get_world_size()):
if r == 0:
assert value_list[r].item() == 12
else:
assert value_list[r].item() == 0
_test(barrier=True)
_test(barrier=False)
|
import os
import pytest
import ignite.distributed as idist
from ignite.distributed.utils import has_xla_support
from tests.ignite.distributed.utils import (
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.skipif(has_xla_support, reason="Skip if has PyTorch XLA package")
def test_xla_distrib_spawn_no_xla_support():
with pytest.raises(ValueError, match=r"Backend should be one of"):
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", 1, "xla"), nproc_per_node=1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_no_spawn():
idist.initialize("xla-tpu")
_test_distrib_config(local_rank=0, backend="xla-tpu", ws=1, true_device="xla")
idist.finalize()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_spawn_one_proc():
try:
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", 1, "xla"), nproc_per_node=1)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", n, "xla"), nproc_per_node=n)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_sync_as_xla():
from ignite.distributed.comp_models.xla import _XlaDistModel
_test_sync(_XlaDistModel)
def _test_sync_as_xla_in_child_proc(index):
from ignite.distributed.comp_models.xla import _XlaDistModel
_test_sync(_XlaDistModel)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_sync_as_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_sync_as_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_methods_in_xla_context():
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
_test_distrib_config(local_rank=0, backend="xla-tpu", ws=1, true_device="xla", rank=0)
def _test_idist_methods_in_xla_context_in_child_proc(index):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
import torch_xla.core.xla_model as xm
_test_distrib_config(
local_rank=index, backend="xla-tpu", ws=xm.xrt_world_size(), true_device="xla", rank=xm.get_ordinal()
)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_methods_in_xla_context_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_methods_in_xla_context_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_reduce_xla():
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
def _test_idist_all_reduce_xla_in_child_proc(index):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_reduce_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_all_reduce_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_new_group_xla():
device = idist.device()
_test_distrib_new_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_gather_xla():
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
def _test_idist_all_gather_xla_in_child_proc(index):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_gather_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_all_gather_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_broadcast_xla():
device = idist.device()
_test_distrib_broadcast(device)
def _test_idist_broadcast_xla_in_child_proc(index):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_broadcast_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_broadcast_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_barrier_xla():
device = idist.device()
_test_distrib_barrier(device)
def _test_idist_barrier_xla_in_child_proc(index):
device = idist.device()
_test_distrib_barrier(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_barrier_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_barrier_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_one_rank_only_xla():
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
def _test_idist_one_rank_only_xla_nprocs(index):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_one_rank_only_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_one_rank_only_xla_nprocs, args=(), nprocs=n)
|
import os
import pytest
import torch
import torch.distributed as dist
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_native_dist_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
def _test_native_distrib_single_node_launch_tool(backend, device, local_rank, world_size, init_method=None, **kwargs):
import os
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize(backend, init_method=init_method, **kwargs)
_test_distrib_config(local_rank, backend, world_size, device, rank, true_init_method=init_method)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_launch_tool_gloo(init_method, get_fixed_dirname, local_rank, world_size):
from datetime import timedelta
timeout = timedelta(seconds=20)
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_distrib_single_node_launch_tool_gloo')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test_native_distrib_single_node_launch_tool(
"gloo", device, local_rank, world_size, timeout=timeout, init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_launch_tool_nccl(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_distrib_single_node_launch_tool_nccl')}/shared"
device = torch.device(f"cuda:{local_rank}")
_test_native_distrib_single_node_launch_tool("nccl", device, local_rank, world_size, init_method=init_method)
def _test_native_distrib_single_node_spawn(init_method, backend, device, **kwargs):
world_size = 4 if torch.device(device).type == "cpu" else torch.cuda.device_count()
idist.spawn(
backend,
_test_distrib_config,
args=(backend, world_size, device),
nproc_per_node=world_size,
init_method=init_method,
**kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_spawn_gloo(init_method, dirname):
from datetime import timedelta
timeout = timedelta(seconds=20)
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test_native_distrib_single_node_spawn(init_method, "gloo", device, timeout=timeout)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
device = torch.device("cuda")
_test_native_distrib_single_node_spawn(init_method, "nccl", device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_sync_as_native_gloo(distributed_context_single_node_gloo):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_sync_as_native_nccl(distributed_context_single_node_nccl):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_new_group_native_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_new_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_new_group_native_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_new_group(device)
def _test_idist_methods_in_native_context(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
ws = dist.get_world_size()
rank = dist.get_rank()
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test_idist_methods_in_native_context("gloo", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = torch.device(f"cuda:{local_rank}")
_test_idist_methods_in_native_context("nccl", device, local_rank)
def _test_idist_methods_in_native_context_set_local_rank(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
lrank = int(os.environ["LOCAL_RANK"])
del os.environ["LOCAL_RANK"]
ws = dist.get_world_size()
rank = dist.get_rank()
idist.set_local_rank(local_rank)
_test_distrib_config(local_rank=local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
os.environ["LOCAL_RANK"] = str(lrank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context_set_local_rank(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
device = idist.device()
_test_idist_methods_in_native_context_set_local_rank("gloo", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context_set_local_rank(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = idist.device()
_test_idist_methods_in_native_context_set_local_rank("nccl", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist__model_methods_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib__get_max_length(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist__model_methods_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib__get_max_length(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_reduce_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_reduce_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="dist.all_gather_object is not implemented")
def test_idist_all_gather_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="dist.all_gather_object is not implemented")
def test_idist_all_gather_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_broadcast_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_barrier_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_barrier(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_barrier_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_barrier(device)
def _test_idist_methods_overhead(ok_factor):
import time
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for _ in range(m):
start = time.time()
for _ in range(n):
_ = dist.get_world_size()
_ = dist.get_rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="Do not want to run this test on Github or Travis, but CircleCI"
)
def test_idist_methods_overhead_gloo(distributed_context_single_node_gloo):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_overhead_nccl(distributed_context_single_node_nccl):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_one_rank_only_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_one_rank_only_nccl(local_rank, distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.parametrize("rank", range(int(os.environ.get("WORLD_SIZE", 1))))
@pytest.mark.parametrize("local", [True, False])
def test_one_rank_first(distributed, get_rank_zero_dirname, rank, local):
def get_ds(file_path):
rank = idist.get_local_rank() if local else idist.get_rank()
if not file_path.exists():
with open(file_path, "w") as f:
f.write("readed")
return f"{rank} not readed"
else:
return f"{rank} readed"
folder = get_rank_zero_dirname()
file_path = folder / "res.txt"
with idist.one_rank_first(rank, local=local):
x = get_ds(file_path)
output = idist.all_gather(x)
if local:
expected = [
f"{x} not readed" if x == rank else f"{x} readed" for x in range(idist.get_nproc_per_node())
] * idist.get_nnodes()
else:
expected = [f"{x} not readed" if x == rank else f"{x} readed" for x in range(idist.get_world_size())]
print("expected:", expected, idist.get_nnodes())
assert set(expected) == set(output)
@pytest.mark.distributed
def test_one_rank_first_asserts():
rank = 100
with pytest.raises(
ValueError, match=f"rank should be between 0 and {idist.get_world_size() - 1}, but given {rank}"
):
with idist.one_rank_first(rank):
pass
|
import pytest
import torch
from ignite.distributed.comp_models import has_hvd_support
if not has_hvd_support:
pytest.skip("Skip if no Horovod package", allow_module_level=True)
else:
import horovod.torch as hvd
from ignite.distributed.comp_models.horovod import _HorovodDistModel
@pytest.mark.distributed
def test__hvd_dist_model():
with pytest.raises(ValueError, match=r"Backend should be one of"):
_HorovodDistModel.create_from_backend("abc")
def _assert_model(model, true_conf):
if "cuda" in true_conf["device"]:
assert model.device() == torch.device(f"{true_conf['device']}:{true_conf['local_rank']}")
else:
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__hvd_dist_model_create_from_backend_no_dist(backend, true_device):
model = _HorovodDistModel.create_from_backend(backend=backend)
assert hvd.rank() > -1
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__hvd_dist_model_create_from_backend_dist(backend, true_device):
model = _HorovodDistModel.create_from_backend(backend=backend)
assert hvd.rank() > -1
with pytest.raises(RuntimeError, match=r"Can not re-initialize Horovod if it is already initialized"):
_HorovodDistModel.create_from_backend(backend=backend)
_assert_model(
model,
{
"device": true_device,
"local_rank": hvd.local_rank(),
"rank": hvd.rank(),
"world_size": hvd.size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": hvd.local_size(),
},
)
model.finalize()
def _test__hvd_dist_model_create_from_context_no_dist(true_backend, true_device):
with pytest.raises(ValueError, match=r"Horovod has not been initialized"):
hvd.rank()
assert _HorovodDistModel.create_from_context() is None
hvd.init()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
model = _HorovodDistModel.create_from_context()
assert model.backend() == true_backend
_assert_model(model, true_conf)
hvd.shutdown()
def _test__hvd_dist_model_create_from_context_dist(true_backend, true_device):
assert _HorovodDistModel.create_from_context() is None
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
true_conf = {
"device": true_device,
"local_rank": lrank,
"rank": hvd.rank(),
"world_size": hvd.size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": hvd.local_size(),
}
model = _HorovodDistModel.create_from_context()
assert model.backend() == true_backend
_assert_model(model, true_conf)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_no_dist(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_no_dist, ("horovod", "cpu"), np=1)
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_no_dist, ("horovod", "cpu"), np=1)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_no_dist_cuda(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_no_dist, ("horovod", "cuda"), np=1)
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_no_dist, ("horovod", "cuda"), np=1)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_dist_1(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_dist, ("horovod", "cpu"), np=4)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_dist_2(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_dist, ("horovod", "cpu"), np=4)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_dist_cuda_1(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_dist, ("horovod", "cuda"), np=torch.cuda.device_count())
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_dist_cuda_2(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_dist, ("horovod", "cuda"), np=torch.cuda.device_count())
def _test__hvd_dist_model_warning_index_less_localrank():
assert torch.cuda.is_available()
assert _HorovodDistModel.create_from_context() is None
hvd.init()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _HorovodDistModel.create_from_context()
assert isinstance(model, _HorovodDistModel), f"{type(model)} vs _HorovodDistModel"
if hvd.local_rank() == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__hvd_dist_model_warning_index_less_localrank(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_warning_index_less_localrank, (), np=torch.cuda.device_count())
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert hvd.rank() > -1
assert isinstance(_model, _HorovodDistModel), f"{type(_model)} vs _HorovodDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.backend() == backend
if "cuda" in device:
assert _model.device() == torch.device(f"{device}:{local_rank}")
else:
assert _model.device() == torch.device(device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_spawn():
num_workers_per_machine = 4
_HorovodDistModel.spawn(
_test_dist_spawn_fn,
args=("horovod", num_workers_per_machine, "cpu"),
kwargs_dict={},
nproc_per_node=num_workers_per_machine,
use_gloo=True,
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_spawn_cuda():
num_workers_per_machine = torch.cuda.device_count()
_HorovodDistModel.spawn(
_test_dist_spawn_fn,
args=("horovod", num_workers_per_machine, "cuda"),
kwargs_dict={},
nproc_per_node=num_workers_per_machine,
use_gloo=True,
)
|
import os
import pytest
import torch
from ignite.distributed.comp_models import has_xla_support
if not has_xla_support:
pytest.skip("Skip if no XLA support", allow_module_level=True)
else:
from ignite.distributed.comp_models.xla import _XlaDistModel
@pytest.mark.tpu
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_model():
available_backends = _XlaDistModel.available_backends
assert "xla-tpu" in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_XlaDistModel.create_from_backend("abc")
def _test_xla_spawn_fn(local_rank, world_size, device):
from ignite.distributed.utils import _model
assert isinstance(_model, _XlaDistModel), f"{type(_model)} vs _XlaDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
d = _model.device()
assert isinstance(d, torch.device) and d.type == device
assert _model.get_rank() == local_rank
assert _model.get_nproc_per_node() == world_size
assert _model.get_node_rank() == 0
assert _model.get_nnodes() == 1
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_one_proc():
try:
_XlaDistModel.spawn(_test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
_XlaDistModel.spawn(_test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n)
except SystemExit:
pass
def _assert_model(model, true_conf):
assert model.device() == true_conf["device"]
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_backend():
# without spawn
model = _XlaDistModel.create_from_backend("xla-tpu")
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context():
# without spawn
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
def _test__xla_dist_model_create_from_context_in_child_proc(index):
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": index,
"rank": xm.get_ordinal(),
"world_size": xm.xrt_world_size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": xm.xrt_world_size(),
},
)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test__xla_dist_model_create_from_context_in_child_proc, args=(), nprocs=n)
def main_fold(fold):
import time
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
from ignite.engine import Engine
device = xm.xla_device(fold)
comp_model = _XlaDistModel.create_from_context()
assert comp_model.device() == device
model = nn.Linear(100, 10)
model.to(device) # Move model before creating optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
def training_step(engine, _):
data = torch.rand(4, 100, device=device)
model.train()
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = output.sum()
loss.backward()
xm.optimizer_step(optimizer, barrier=True)
return loss.item()
trainer = Engine(training_step)
# THIS CAN BE A CAUSE OF CRASH if DEVICE is OTHER THAN device
tensor = torch.tensor([fold + 1.0], dtype=torch.float).to(comp_model.device())
xm.all_reduce("max", [tensor])
time.sleep(0.01 * fold)
trainer.run([0] * 100, max_epochs=2)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_run_parallel_n_threads_without_sync():
# tests issue : https://github.com/pytorch/ignite/issues/1096
import torch_xla.core.xla_model as xm
from joblib import delayed, Parallel
devices = xm.get_xla_supported_devices()
folds = 1
d = 0
if len(devices) > 5:
folds = 5
d = 1
Parallel(n_jobs=folds, backend="threading")(delayed(main_fold)(i + d) for i in range(folds))
|
import pytest
import torch
from ignite.distributed.comp_models.base import _SerialModel, ComputationModel
def test_serial_model():
_SerialModel.create_from_backend()
model = _SerialModel.create_from_context()
assert model.get_local_rank() == 0
assert model.get_rank() == 0
assert model.get_world_size() == 1
assert model.get_nproc_per_node() == 1
assert model.get_nnodes() == 1
assert model.get_node_rank() == 0
if torch.cuda.is_available():
assert model.device().type == "cuda"
else:
assert model.device().type == "cpu"
assert model.backend() is None
model.finalize()
with pytest.raises(NotImplementedError, match=r"Serial computation model does not implement spawn method"):
model.spawn()
model.all_reduce(1)
model.all_gather(1)
model.broadcast(1)
assert model._do_all_reduce(torch.tensor(1)) == torch.tensor(1)
assert model._do_all_gather(torch.tensor(1)) == torch.tensor(1)
assert model._do_broadcast(torch.tensor(1), src=0) == torch.tensor(1)
model.barrier()
def test__encode_str__decode_str():
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
s = "test-abcedfg"
encoded_s = ComputationModel._encode_str(s, device, 1024)
assert isinstance(encoded_s, torch.Tensor) and encoded_s.shape == (1, 1025)
decoded_s = ComputationModel._decode_str(encoded_s)
assert isinstance(decoded_s, list) and len(decoded_s) == 1
assert decoded_s[0] == s
def test__encode_input_data():
encoded_msg = ComputationModel._encode_input_data(None, is_src=True)
assert encoded_msg == [-1] * 512
encoded_msg = ComputationModel._encode_input_data(12.0, is_src=True)
assert encoded_msg == [1] + [-1] * 511
encoded_msg = ComputationModel._encode_input_data("abc", is_src=True)
assert encoded_msg == [2] + [-1] * 511
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.randint(-1235, 1233, size=(2, 512, 32, 32, 64))
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 0, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
for t in [None, "abc", torch.rand(2, 512, 32, 32, 64), 12.34, object()]:
encoded_msg = ComputationModel._encode_input_data(t, is_src=False)
assert encoded_msg == [-1] * 512
def test__decode_as_placeholder():
device = torch.device("cpu")
encoded_msg = [-1] * 512
encoded_msg[0] = 1
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, float) and res == 0.0
encoded_msg = [-1] * 512
encoded_msg[0] = 2
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, str) and res == ""
encoded_msg = [-1] * 512
encoded_msg[0] = 0
encoded_msg[1 : 1 + 7] = [6, 2, 3, 4, 5, 6, 7]
dtype_str = "torch.int64"
payload = [len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
encoded_msg[1 + 7 : 1 + 7 + len(payload)] = payload
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == torch.int64 and res.shape == (2, 3, 4, 5, 6, 7)
encoded_msg = [-1] * 512
with pytest.raises(RuntimeError, match="Internal error: unhandled dtype"):
ComputationModel._decode_as_placeholder(encoded_msg, device)
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
def test__setup_placeholder():
device = torch.device("cpu")
from ignite.distributed.utils import _model
for t in [torch.rand(2, 3, 4), "abc", 123.45]:
data = _model._setup_placeholder(t, device, True)
assert isinstance(data, type(t))
if isinstance(data, torch.Tensor):
assert (data == t).all()
else:
assert data == t
|
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device, **kwargs):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
if "master_addr" in kwargs:
assert os.environ["MASTER_ADDR"] == kwargs["master_addr"]
if "master_port" in kwargs:
assert os.environ["MASTER_PORT"] == str(kwargs["master_port"])
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
kwargs_dict = {}
for key in ["master_addr", "master_port"]:
if key in spawn_kwargs:
kwargs_dict[key] = spawn_kwargs[key]
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict=kwargs_dict,
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "CUSTOM_ADDR_PORT", "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
spawn_kwargs = {}
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
elif init_method == "CUSTOM_ADDR_PORT":
init_method = None
spawn_kwargs["master_addr"] = "0.0.0.0"
spawn_kwargs["master_port"] = 2345
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, **spawn_kwargs
)
if device.type == "cpu":
spawn_kwargs["start_method"] = "fork"
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, **spawn_kwargs
)
if init_method not in [None, "env://"]:
with pytest.raises(ValueError, match=r"master_addr should be None if init_method is provided"):
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, master_addr="abc"
)
with pytest.raises(ValueError, match=r"master_port should be None if init_method is provided"):
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, master_port=123
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "CUSTOM_ADDR_PORT", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
spawn_kwargs = {}
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
elif init_method == "CUSTOM_ADDR_PORT":
init_method = None
spawn_kwargs["master_addr"] = "0.0.0.0"
spawn_kwargs["master_port"] = 2345
nproc = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=nproc, device="cuda", init_method=init_method, **spawn_kwargs
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
|
import random
from pathlib import Path
import pytest
@pytest.fixture
def no_site_packages(request):
import sys
modules = {}
for k in sys.modules:
if request.param in k:
modules[k] = sys.modules[k]
for k in modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in modules:
sys.modules[k] = modules[k]
@pytest.fixture()
def visdom_offline_logfile(dirname):
log_file = dirname / "logs.visdom"
yield log_file
vd_hostname = None
vd_port = None
vd_server_process = None
@pytest.fixture()
def visdom_server():
# Start Visdom server once and stop it with visdom_server_stop
global vd_hostname, vd_port, vd_server_process
if vd_server_process is None:
import subprocess
import time
from visdom import Visdom
from visdom.server.build import download_scripts
(Path.home() / ".visdom").mkdir(exist_ok=True)
download_scripts()
vd_hostname = "localhost"
vd_port = random.randint(8089, 8887)
try:
vis = Visdom(server=vd_hostname, port=vd_port, raise_exceptions=True)
except ConnectionError:
pass
vd_server_process = subprocess.Popen(
["python", "-m", "visdom.server", "--hostname", vd_hostname, "-port", str(vd_port)]
)
time.sleep(5)
vis = Visdom(server=vd_hostname, port=vd_port)
assert vis.check_connection()
vis.close()
yield (vd_hostname, vd_port)
@pytest.fixture()
def visdom_server_stop():
yield None
import time
vd_server_process.kill()
time.sleep(2)
|
# coding: utf-8
|
from unittest.mock import Mock, patch
import pytest
import torch
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, State
def test_no_pynvml_package():
with patch.dict("sys.modules", {"pynvml.smi": None}):
with pytest.raises(ModuleNotFoundError, match="This contrib module requires pynvml to be installed."):
GpuInfo()
@pytest.mark.skipif(torch.cuda.is_available(), reason="Skip if GPU")
def test_no_gpu():
with pytest.raises(RuntimeError, match="This contrib module requires available GPU"):
GpuInfo()
def _test_gpu_info(device="cpu"):
gpu_info = GpuInfo()
# increase code cov
gpu_info.reset()
gpu_info.update(None)
t = torch.rand(4, 10, 100, 100).to(device)
data = gpu_info.compute()
assert len(data) > 0
assert "fb_memory_usage" in data[0]
mem_report = data[0]["fb_memory_usage"]
assert "used" in mem_report and "total" in mem_report
assert mem_report["total"] > 0.0
assert mem_report["used"] > t.shape[0] * t.shape[1] * t.shape[2] * t.shape[3] / 1024.0 / 1024.0
assert "utilization" in data[0]
util_report = data[0]["utilization"]
assert "gpu_util" in util_report
# with Engine
engine = Engine(lambda engine, batch: 0.0)
engine.state = State(metrics={})
gpu_info.completed(engine, name="gpu")
assert "gpu:0 mem(%)" in engine.state.metrics
assert isinstance(engine.state.metrics["gpu:0 mem(%)"], int)
assert int(mem_report["used"] * 100.0 / mem_report["total"]) == engine.state.metrics["gpu:0 mem(%)"]
if util_report["gpu_util"] != "N/A":
assert "gpu:0 util(%)" in engine.state.metrics
assert isinstance(engine.state.metrics["gpu:0 util(%)"], int)
assert int(util_report["gpu_util"]) == engine.state.metrics["gpu:0 util(%)"]
else:
assert "gpu:0 util(%)" not in engine.state.metrics
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gpu_info_on_cuda():
_test_gpu_info(device="cuda")
query_resp = None
@pytest.fixture
def mock_pynvml_module():
with patch.dict(
"sys.modules",
{
"pynvml": Mock(name="pynvml"),
"pynvml.smi": Mock(name="pynvml.smi"),
"pynvml.smi.nvidia_smi": Mock(name="pynvml.smi.nvidia_smi"),
},
):
import pynvml
from pynvml.smi import nvidia_smi
def query(*args, **kwargs):
return query_resp
def getInstance():
nvsmi = Mock()
nvsmi.DeviceQuery = Mock(side_effect=query)
return nvsmi
nvidia_smi.getInstance = Mock(side_effect=getInstance)
yield pynvml
@pytest.fixture
def mock_gpu_is_available():
with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = True
yield mock_cuda
@pytest.mark.skipif(torch.cuda.is_available(), reason="No need to mock if has GPU")
def test_gpu_info_mock(mock_pynvml_module, mock_gpu_is_available):
global query_resp
query_resp = {"gpu": [{"fb_memory_usage": {"used": 100.0, "total": 11000.0}, "utilization": {"gpu_util": 50.0}}]}
assert torch.cuda.is_available()
_test_gpu_info()
# Tests https://github.com/pytorch/ignite/issues/1040
query_resp = {"gpu": [{"fb_memory_usage": {"used": 100.0, "total": 11000.0}, "utilization": {"gpu_util": "N/A"}}]}
_test_gpu_info()
def _test_with_custom_query(resp, warn_msg, check_compute=False):
from pynvml.smi import nvidia_smi
def query(*args, **kwargs):
return resp
def getInstance():
nvsmi = Mock()
nvsmi.DeviceQuery = Mock(side_effect=query)
return nvsmi
nvidia_smi.getInstance = Mock(side_effect=getInstance)
gpu_info = GpuInfo()
if check_compute:
with pytest.warns(UserWarning, match=warn_msg):
gpu_info.compute()
# with Engine
engine = Engine(lambda engine, batch: 0.0)
engine.state = State(metrics={})
with pytest.warns(UserWarning, match=warn_msg):
gpu_info.completed(engine, name="gpu info")
# No GPU info
_test_with_custom_query(resp={}, warn_msg=r"No GPU information available", check_compute=True)
# No GPU memory info
_test_with_custom_query(resp={"gpu": [{"utilization": {}}]}, warn_msg=r"No GPU memory usage information available")
# No GPU utilization info
_test_with_custom_query(
resp={"gpu": [{"fb_memory_usage": {}}]}, warn_msg=r"No GPU utilization information available"
)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import average_precision_score
import ignite.distributed as idist
from ignite.contrib.metrics import AveragePrecision
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
AveragePrecision()
def test_no_update():
ap = AveragePrecision()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
ap.compute()
def test_input_types():
ap = AveragePrecision()
ap.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
ap.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ap.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
ap.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ap.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
ap = AveragePrecision()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ap._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ap._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
ap._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_binary_and_multilabel_inputs():
ap = AveragePrecision()
def _test(y_pred, y, batch_size):
ap.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ap.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ap.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = ap.compute()
assert isinstance(res, float)
assert average_precision_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 1),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 1),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_integration_binary_and_mulitlabel_inputs():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
ap_metric = AveragePrecision()
ap_metric.attach(engine, "ap")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_ap = average_precision_score(np_y, np_y_pred)
data = list(range(y_pred.shape[0] // batch_size))
ap = engine.run(data, max_epochs=1).metrics["ap"]
assert isinstance(ap, float)
assert np_ap == pytest.approx(ap)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 10),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 10),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(100, 3)).long(), torch.randint(0, 2, size=(100, 3)).long(), 10),
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_and_multilabel_inputs(device):
rank = idist.get_rank()
torch.manual_seed(12)
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
ap = AveragePrecision(device=metric_device)
torch.manual_seed(10 + rank)
ap.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ap.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ap.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = ap.compute()
assert isinstance(res, float)
assert average_precision_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(3):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
n_iters = 80
batch_size = 16
n_classes = 2
def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
metric_device = torch.device(metric_device)
engine = Engine(update_fn)
ap = AveragePrecision(device=metric_device)
ap.attach(engine, "ap")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "ap" in engine.state.metrics
res = engine.state.metrics["ap"]
true_res = average_precision_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
def get_tests(is_N):
torch.manual_seed(12 + rank)
if is_N:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
else:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
y_preds = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size, :],
)
return y_preds, y_true, update_fn
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
# Binary input data of shape (N,)
y_preds, y_true, update_fn = get_tests(is_N=True)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
# Binary input data of shape (N, L)
y_preds, y_true, update_fn = get_tests(is_N=False)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_and_multilabel_inputs, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import roc_curve
from ignite import distributed as idist
from ignite.contrib.metrics.roc_auc import RocCurve
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
def test_wrong_setup():
def compute_fn(y_preds, y_targets):
return 0.0
with pytest.raises(NotComputableError, match="RocCurve must have at least one example before it can be computed"):
metric = RocCurve(compute_fn)
metric.compute()
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed"):
RocCurve()
def test_roc_curve():
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
roc_curve_metric = RocCurve()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
roc_curve_metric.update((y_pred, y))
fpr, tpr, thresholds = roc_curve_metric.compute()
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (x[1], x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_activated_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy()
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred_sigmoid)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (torch.sigmoid(x[1]), x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = RocCurve(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = RocCurve(check_compute_fn=False)
em.update(output)
def test_distrib_integration(distributed):
rank = idist.get_rank()
torch.manual_seed(41 + rank)
n_batches, batch_size = 5, 10
y = torch.randint(0, 2, size=(n_batches * batch_size,))
y_pred = torch.rand((n_batches * batch_size,))
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size],
y[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
device = torch.device("cpu") if idist.device().type == "xla" else idist.device()
metric = RocCurve(device=device)
metric.attach(engine, "roc_curve")
data = list(range(n_batches))
engine.run(data=data, max_epochs=1)
fpr, tpr, thresholds = engine.state.metrics["roc_curve"]
assert isinstance(fpr, torch.Tensor) and fpr.device == device
assert isinstance(tpr, torch.Tensor) and tpr.device == device
assert isinstance(thresholds, torch.Tensor) and thresholds.device == device
y = idist.all_gather(y)
y_pred = idist.all_gather(y_pred)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(y.cpu().numpy(), y_pred.cpu().numpy())
np.testing.assert_array_almost_equal(fpr.cpu().numpy(), sk_fpr)
np.testing.assert_array_almost_equal(tpr.cpu().numpy(), sk_tpr)
np.testing.assert_array_almost_equal(thresholds.cpu().numpy(), sk_thresholds)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import roc_auc_score
import ignite.distributed as idist
from ignite.contrib.metrics import ROC_AUC
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
ROC_AUC()
def test_no_update():
roc_auc = ROC_AUC()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
roc_auc.compute()
def test_input_types():
roc_auc = ROC_AUC()
roc_auc.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
roc_auc.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
roc_auc.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
roc_auc.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
roc_auc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
roc_auc = ROC_AUC()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
roc_auc._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
roc_auc._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
roc_auc._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_binary_and_multilabel_inputs():
roc_auc = ROC_AUC()
def _test(y_pred, y, batch_size):
roc_auc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
roc_auc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
roc_auc.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = roc_auc.compute()
assert isinstance(res, float)
assert roc_auc_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 1),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 1),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(5):
test_cases = get_test_cases()
# check multiple random inputs as random exact occurencies are rare
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = ROC_AUC(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = ROC_AUC(check_compute_fn=False)
em.update(output)
def test_integration_binary_and_multilabel_inputs():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_auc_metric = ROC_AUC()
roc_auc_metric.attach(engine, "roc_auc")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_roc_auc = roc_auc_score(np_y, np_y_pred)
data = list(range(y_pred.shape[0] // batch_size))
roc_auc = engine.run(data, max_epochs=1).metrics["roc_auc"]
assert isinstance(roc_auc, float)
assert np_roc_auc == pytest.approx(roc_auc)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 10),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 10),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(100, 3)).long(), torch.randint(0, 2, size=(100, 3)).long(), 10),
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_and_multilabel_inputs(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
roc_auc = ROC_AUC(device=metric_device)
roc_auc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
roc_auc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
roc_auc.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = roc_auc.compute()
assert isinstance(res, float)
assert roc_auc_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for i in range(5):
torch.manual_seed(12 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
n_iters = 80
batch_size = 16
n_classes = 2
def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
metric_device = torch.device(metric_device)
engine = Engine(update_fn)
roc_auc = ROC_AUC(device=metric_device)
roc_auc.attach(engine, "roc_auc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "roc_auc" in engine.state.metrics
res = engine.state.metrics["roc_auc"]
true_res = roc_auc_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
def get_tests(is_N):
if is_N:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
else:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
y_preds = torch.rand(n_iters * batch_size, 10).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
return y_preds, y_true, update_fn
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
# Binary input data of shape (N,)
y_preds, y_true, update_fn = get_tests(is_N=True)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
# Binary input data of shape (N, L)
y_preds, y_true, update_fn = get_tests(is_N=False)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_and_multilabel_inputs, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import cohen_kappa_score
import ignite.distributed as idist
from ignite.contrib.metrics import CohenKappa
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
CohenKappa()
def test_no_update():
ck = CohenKappa()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
ck.compute()
def test_input_types():
ck = CohenKappa()
ck.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
ck.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ck.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
ck.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ck.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
ck = CohenKappa()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ck._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ck._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
ck._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_cohen_kappa_wrong_weights_type():
with pytest.raises(ValueError, match=r"Kappa Weighting type must be"):
ck = CohenKappa(weights=7)
with pytest.raises(ValueError, match=r"Kappa Weighting type must be"):
ck = CohenKappa(weights="dd")
@pytest.mark.parametrize("weights", [None, "linear", "quadratic"])
def test_binary_input(weights):
ck = CohenKappa(weights)
def _test(y_pred, y, batch_size):
ck.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ck.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ck.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = ck.compute()
assert isinstance(res, float)
assert cohen_kappa_score(np_y, np_y_pred, weights=weights) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_inputs():
ck = CohenKappa()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long()))
ck.compute()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 6)).long(), torch.randint(0, 2, size=(10, 6)).long()))
ck.compute()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 8)).long(), torch.randint(0, 2, size=(10, 8)).long()))
ck.compute()
@pytest.mark.parametrize("weights", [None, "linear", "quadratic"])
def test_integration_binary_input(weights):
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
ck_metric = CohenKappa(weights=weights)
ck_metric.attach(engine, "ck")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_ck = cohen_kappa_score(np_y, np_y_pred, weights=weights)
data = list(range(y_pred.shape[0] // batch_size))
ck = engine.run(data, max_epochs=1).metrics["ck"]
assert isinstance(ck, float)
assert np_ck == pytest.approx(ck)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 10),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_input(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
ck = CohenKappa(device=metric_device)
ck.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ck.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ck.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = ck.compute()
assert isinstance(res, float)
assert cohen_kappa_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
]
return test_cases
for i in range(3):
torch.manual_seed(10 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
torch.manual_seed(12 + rank)
# Binary input data of shape (N,) or (N, 1)
y_true = torch.randint(0, 2, size=(n_iters * batch_size,)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
ck = CohenKappa(device=metric_device)
ck.attach(engine, "ck")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "ck" in engine.state.metrics
res = engine.state.metrics["ck"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = cohen_kappa_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
from typing import Tuple
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import precision_recall_curve
import ignite.distributed as idist
from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.engine import Engine
from ignite.metrics.epoch_metric import EpochMetricWarning
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
y = torch.tensor([1, 1])
pr_curve = PrecisionRecallCurve()
pr_curve.update((y, y))
pr_curve.compute()
def test_precision_recall_curve():
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred)
precision_recall_curve_metric = PrecisionRecallCurve()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
precision_recall_curve_metric.update((y_pred, y))
precision, recall, thresholds = precision_recall_curve_metric.compute()
precision = precision.numpy()
recall = recall.numpy()
thresholds = thresholds.numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_precision_recall_curve_with_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
precision_recall_curve_metric = PrecisionRecallCurve(output_transform=lambda x: (x[1], x[2]))
precision_recall_curve_metric.attach(engine, "precision_recall_curve")
data = list(range(size // batch_size))
precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"]
precision = precision.numpy()
recall = recall.numpy()
thresholds = thresholds.numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_precision_recall_curve_with_activated_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy()
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred_sigmoid)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
precision_recall_curve_metric = PrecisionRecallCurve(output_transform=lambda x: (torch.sigmoid(x[1]), x[2]))
precision_recall_curve_metric.attach(engine, "precision_recall_curve")
data = list(range(size // batch_size))
precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"]
precision = precision.cpu().numpy()
recall = recall.cpu().numpy()
thresholds = thresholds.cpu().numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = PrecisionRecallCurve(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = PrecisionRecallCurve(check_compute_fn=False)
em.update(output)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
prc = PrecisionRecallCurve(device=metric_device)
prc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
prc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
prc.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = prc.compute()
assert isinstance(res, Tuple)
assert precision_recall_curve(np_y, np_y_pred)[0] == pytest.approx(res[0].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[1] == pytest.approx(res[1].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[2] == pytest.approx(res[2].cpu().numpy())
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
]
return test_cases
for i in range(3):
torch.manual_seed(12 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 151
torch.manual_seed(12 + rank)
y_true = torch.randint(0, 2, (n_iters * batch_size,)).to(device)
y_preds = torch.randint(0, 2, (n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
prc = PrecisionRecallCurve(device=metric_device)
prc.attach(engine, "prc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "prc" in engine.state.metrics
precision, recall, thresholds = engine.state.metrics["prc"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y_true, np_y_preds)
assert precision.shape == sk_precision.shape
assert recall.shape == sk_recall.shape
assert thresholds.shape == sk_thresholds.shape
assert pytest.approx(precision.cpu().numpy()) == sk_precision
assert pytest.approx(recall.cpu().numpy()) == sk_recall
assert pytest.approx(thresholds.cpu().numpy()) == sk_thresholds
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import ManhattanDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = ManhattanDistance()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mahattan_distance():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = ManhattanDistance()
manhattan = DistanceMetric.get_metric("manhattan")
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = np.abs(ground_truth - a).sum()
assert m.compute() == pytest.approx(np_sum)
assert manhattan.pairwise([a, ground_truth])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - b).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([a, b])
v2 = np.hstack([ground_truth, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - c).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, c])
v2 = np.hstack([v2, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - d).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, d])
v2 = np.hstack([v2, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = ManhattanDistance()
m.attach(engine, "md")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
manhattan = DistanceMetric.get_metric("manhattan")
data = list(range(y_pred.shape[0] // batch_size))
md = engine.run(data, max_epochs=1).metrics["md"]
assert manhattan.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(md)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = ManhattanDistance()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device):
rank = idist.get_rank()
manhattan = DistanceMetric.get_metric("manhattan")
def _test(metric_device):
metric_device = torch.device(metric_device)
m = ManhattanDistance(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert manhattan.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
manhattan = DistanceMetric.get_metric("manhattan")
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = ManhattanDistance(device=metric_device)
m.attach(engine, "md")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "md" in engine.state.metrics
res = engine.state.metrics["md"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
assert pytest.approx(res) == manhattan.pairwise([np_y_preds, np_y_true])[0][1]
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsolutePercentageError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsolutePercentageError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianAbsolutePercentageError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_percentage_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
m = MedianAbsolutePercentageError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_percentage_error == pytest.approx(m.compute())
def test_median_absolute_percentage_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
m = MedianAbsolutePercentageError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_percentage_error == pytest.approx(m.compute())
def test_integration_median_absolute_percentage_error():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianAbsolutePercentageError()
m.attach(engine, "median_absolute_percentage_error")
data = list(range(size // batch_size))
median_absolute_percentage_error = engine.run(data, max_epochs=1).metrics["median_absolute_percentage_error"]
assert np_median_absolute_percentage_error == pytest.approx(median_absolute_percentage_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianAbsolutePercentageError(device=metric_device)
size = 105
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred) / np.abs(np_y)
np_res = 100.0 * np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
size = 105
y_true = torch.rand(size=(n_iters * size,)).to(device)
y_preds = torch.rand(size=(n_iters * size,)).to(device)
def update(engine, i):
return (
y_preds[i * size : (i + 1) * size],
y_true[i * size : (i + 1) * size],
)
engine = Engine(update)
m = MedianAbsolutePercentageError(device=metric_device)
m.attach(engine, "mape")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mape" in engine.state.metrics
res = engine.state.metrics["mape"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true)
np_res = 100.0 * np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import CanberraMetric
from ignite.engine import Engine
def test_wrong_input_shapes():
m = CanberraMetric()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = CanberraMetric()
canberra = DistanceMetric.get_metric("canberra")
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (np.abs(ground_truth - a) / (np.abs(a) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
assert canberra.pairwise([a, ground_truth])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += ((np.abs(ground_truth - b)) / (np.abs(b) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([a, b])
v2 = np.hstack([ground_truth, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += ((np.abs(ground_truth - c)) / (np.abs(c) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, c])
v2 = np.hstack([v2, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - d) / (np.abs(d) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, d])
v2 = np.hstack([v2, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = CanberraMetric()
m.attach(engine, "cm")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
canberra = DistanceMetric.get_metric("canberra")
data = list(range(y_pred.shape[0] // batch_size))
cm = engine.run(data, max_epochs=1).metrics["cm"]
assert canberra.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(cm)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = CanberraMetric()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device):
rank = idist.get_rank()
canberra = DistanceMetric.get_metric("canberra")
def _test(metric_device):
metric_device = torch.device(metric_device)
m = CanberraMetric(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert canberra.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
canberra = DistanceMetric.get_metric("canberra")
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = CanberraMetric(device=metric_device)
m.attach(engine, "cm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cm" in engine.state.metrics
res = engine.state.metrics["cm"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
assert pytest.approx(res) == canberra.pairwise([np_y_preds, np_y_true])[0][1]
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import WaveHedgesDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = WaveHedgesDistance()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = WaveHedgesDistance()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (np.abs(ground_truth - a) / np.maximum.reduce([a, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - b) / np.maximum.reduce([b, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - c) / np.maximum.reduce([c, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - d) / np.maximum.reduce([d, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = WaveHedgesDistance()
m.attach(engine, "whd")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
whd = engine.run(data, max_epochs=1).metrics["whd"]
np_sum = (np.abs(np_y - np_y_pred) / np.maximum.reduce([np_y_pred, np_y])).sum()
assert np_sum == pytest.approx(whd)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = WaveHedgesDistance(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = (np.abs(np_y - np_y_pred) / (np.maximum.reduce([np_y_pred, np_y]) + 1e-30)).sum()
assert np_sum == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = WaveHedgesDistance(device=metric_device)
m.attach(engine, "whm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "whm" in engine.state.metrics
res = engine.state.metrics["whm"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = (np.abs(np_y_true - np_y_preds) / (np.maximum.reduce([np_y_preds, np_y_true]) + 1e-30)).sum()
assert pytest.approx(res) == np_sum
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanAbsoluteError()
with pytest.raises(
NotComputableError, match=r"GeometricMeanAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = GeometricMeanAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
np_prod = 1.0
m = GeometricMeanAbsoluteError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - a)
np_prod = np.multiply.reduce(errors) * np_prod
np_len = len(a)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - b)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(b)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - c)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(c)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - d)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(d)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = GeometricMeanAbsoluteError()
m.attach(engine, "gmae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
gmae = engine.run(data, max_epochs=1).metrics["gmae"]
sum_errors = (np.log(np.abs(np_y - np_y_pred))).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(gmae)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for i in range(5):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(12 + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = GeometricMeanAbsoluteError(device=metric_device)
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
sum_errors = (np.log(np.abs(np_y - np_y_pred))).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(res)
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = GeometricMeanAbsoluteError(device=metric_device)
m.attach(engine, "gmae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "gmae" in engine.state.metrics
res = engine.state.metrics["gmae"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
sum_errors = (np.log(np.abs(np_y_true - np_y_preds))).sum()
np_len = len(y_preds)
np_ans = np.exp(sum_errors / np_len)
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(11 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanNormalizedBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanNormalizedBias()
with pytest.raises(
NotComputableError, match=r"MeanNormalizedBias must have at least one example before it can be computed"
):
m.compute()
def test_zero_gt():
a = np.random.randn(4)
ground_truth = np.zeros(4)
m = MeanNormalizedBias()
with pytest.raises(NotComputableError, match=r"The ground truth has 0."):
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
def test_wrong_input_shapes():
m = MeanNormalizedBias()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MeanNormalizedBias()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = ((ground_truth - a) / ground_truth).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - b) / ground_truth).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - c) / ground_truth).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - d) / ground_truth).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanNormalizedBias()
m.attach(engine, "mnb")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mnb = engine.run(data, max_epochs=1).metrics["mnb"]
np_sum = ((np_y - np_y_pred) / np_y).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(mnb)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanNormalizedBias(device=metric_device)
y_pred = torch.randint(1, 11, size=(10,), device=device).float()
y = torch.randint(1, 11, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = ((np_y - np_y_pred) / np_y).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanNormalizedBias(device=metric_device)
m.attach(engine, "mnb")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mnb" in engine.state.metrics
res = engine.state.metrics["mnb"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = ((np_y_true - np_y_preds) / np_y_true).sum()
np_len = len(np_y_preds)
np_ans = np_sum / np_len
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanRelativeAbsoluteError()
with pytest.raises(
NotComputableError,
match=r"GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed",
):
m.compute()
def test_wrong_input_shapes():
m = GeometricMeanRelativeAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
m = GeometricMeanRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_gmrae == pytest.approx(m.compute())
def test_integration():
y_pred = torch.rand(size=(100,))
y = torch.rand(size=(100,))
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = GeometricMeanRelativeAbsoluteError()
m.attach(engine, "gmrae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
gmrae = engine.run(data, max_epochs=1).metrics["gmrae"]
sum_errors = np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(gmrae)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = GeometricMeanRelativeAbsoluteError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
assert m.compute() == pytest.approx(np_gmrae, rel=1e-4)
for i in range(3):
torch.manual_seed(12 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
torch.manual_seed(12)
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
gmrae = GeometricMeanRelativeAbsoluteError(device=metric_device)
gmrae.attach(engine, "gmrae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "gmrae" in engine.state.metrics
res = engine.state.metrics["gmrae"]
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
assert pytest.approx(res, rel=1e-4) == np_gmrae
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalAbsoluteError()
with pytest.raises(
NotComputableError, match=r"FractionalAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = FractionalAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = FractionalAbsoluteError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * np.abs((a - ground_truth)) / (np.abs(a) + np.abs(ground_truth))).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((b - ground_truth)) / (np.abs(b) + np.abs(ground_truth))).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((c - ground_truth)) / (np.abs(c) + np.abs(ground_truth))).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (2 * np.abs((d - ground_truth)) / (np.abs(d) + np.abs(ground_truth))).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = FractionalAbsoluteError()
m.attach(engine, "fab")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
fab = engine.run(data, max_epochs=1).metrics["fab"]
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(fab)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = FractionalAbsoluteError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
fae = FractionalAbsoluteError(device=metric_device)
fae.attach(engine, "fae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "fae" in engine.state.metrics
res = engine.state.metrics["fae"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_sum = (2 * np.abs((np_y_pred - np_y)) / (np.abs(np_y_pred) + np.abs(np_y))).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsoluteError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianAbsoluteError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
m = MedianAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_error == pytest.approx(m.compute())
def test_median_absolute_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
m = MedianAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_error == pytest.approx(m.compute())
def test_integration_median_absolute_error():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianAbsoluteError()
m.attach(engine, "median_absolute_error")
data = list(range(size // batch_size))
median_absolute_error = engine.run(data, max_epochs=1).metrics["median_absolute_error"]
assert np_median_absolute_error == pytest.approx(median_absolute_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianAbsoluteError(device=metric_device)
size = 105
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred)
np_res = np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 105
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MedianAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds)
np_res = np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(10 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianRelativeAbsoluteError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianRelativeAbsoluteError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_relative_absolute_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
m = MedianRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_relative_error == pytest.approx(m.compute())
def test_median_relative_absolute_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
m = MedianRelativeAbsoluteError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters + 1):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_relative_error == pytest.approx(m.compute())
def test_integration_median_relative_absolute_error_with_output_transform():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianRelativeAbsoluteError()
m.attach(engine, "median_absolute_relative_error")
data = list(range(size // batch_size))
median_absolute_relative_error = engine.run(data, max_epochs=1).metrics["median_absolute_relative_error"]
assert np_median_absolute_relative_error == pytest.approx(median_absolute_relative_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianRelativeAbsoluteError(device=metric_device)
torch.manual_seed(10 + rank)
size = 151
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())
np_res = np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
size = 151
y_true = torch.rand(size=(size,)).to(device)
y_preds = torch.rand(size=(size,)).to(device)
def update(engine, i):
return (
y_preds[i * size : (i + 1) * size],
y_true[i * size : (i + 1) * size],
)
engine = Engine(update)
m = MedianRelativeAbsoluteError(device=metric_device)
m.attach(engine, "mare")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "mare" in engine.state.metrics
res = engine.state.metrics["mare"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true - np_y_true.mean())
np_res = np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from typing import Optional
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression, _torch_median
def test_base_regression_shapes():
class L1(_BaseRegression):
def reset(self):
self._sum_of_errors = 0.0
def _update(self, output):
y_pred, y = output
errors = torch.abs(y.view_as(y_pred) - y_pred)
self._sum_of_errors += torch.sum(errors).item()
def compute(self):
return self._sum_of_errors
m = L1()
with pytest.raises(ValueError, match=r"Input y_pred should have shape \(N,\) or \(N, 1\)"):
y = torch.rand([1, 1, 1])
m.update((y, y))
with pytest.raises(ValueError, match=r"Input y should have shape \(N,\) or \(N, 1\)"):
y = torch.rand([1, 1, 1])
m.update((torch.rand(1, 1), y))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(2), torch.rand(2, 1)))
with pytest.raises(TypeError, match=r"Input y_pred dtype should be float"):
y = torch.tensor([1, 1])
m.update((y, y))
with pytest.raises(TypeError, match=r"Input y dtype should be float"):
y = torch.tensor([1, 1])
m.update((y.float(), y))
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
def test_torch_median_numpy(size, device: Optional[str] = None):
data = torch.rand(size).to(device)
assert _torch_median(data) == np.median(data.cpu().numpy())
@pytest.mark.tpu
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_on_even_size_xla(size):
device = "xla"
test_torch_median_numpy(size, device=device)
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_on_even_size_gpu(size):
test_torch_median_numpy(size, device="cuda")
@pytest.mark.parametrize("size", [100, 101, (30, 3), (31, 3)])
def test_create_even_size_cpu(size):
test_torch_median_numpy(size, device="cpu")
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import FractionalBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = FractionalBias()
with pytest.raises(
NotComputableError, match=r"FractionalBias must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = FractionalBias()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_fractional_bias():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = FractionalBias()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * (ground_truth - a) / (a + ground_truth)).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - b) / (b + ground_truth)).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - c) / (c + ground_truth)).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (2 * (ground_truth - d) / (d + ground_truth)).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = FractionalBias()
m.attach(engine, "fb")
np_y = y.double().numpy().ravel()
np_y_pred = y_pred.double().numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
fb = engine.run(data, max_epochs=1).metrics["fb"]
np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y)).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(fb)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = FractionalBias()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device, tol=1e-5):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = FractionalBias(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = (2 * (np_y - np_y_pred) / (np_y_pred + np_y + 1e-30)).sum()
np_len = len(y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(res, rel=tol)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device, tol=1e-5):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,), dtype=torch.double).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,), dtype=torch.double).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = FractionalBias(device=metric_device)
m.attach(engine, "fb")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "fb" in engine.state.metrics
res = engine.state.metrics["fb"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = (2 * (np_y_true - np_y_preds) / (np_y_preds + np_y_true + 1e-30)).sum()
np_len = len(y_preds)
np_ans = np_sum / np_len
assert pytest.approx(res, rel=tol) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device, tol=1e-4)
_test_distrib_integration(device, tol=1e-4)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device, tol=1e-4)
_test_distrib_integration(device, tol=1e-4)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanError()
with pytest.raises(NotComputableError, match=r"MeanError must have at least one example before it can be computed"):
m.compute()
def test_wrong_input_shapes():
m = MeanError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MeanError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (ground_truth - a).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - b).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - c).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (ground_truth - d).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanError()
m.attach(engine, "me")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
me = engine.run(data, max_epochs=1).metrics["me"]
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(me, rel=1e-4)
def get_test_cases():
test_cases = [
(torch.rand(size=(50,)), torch.rand(size=(50,)), 1),
(torch.rand(size=(50, 1)), torch.rand(size=(50, 1)), 10),
]
return test_cases
for _ in range(5):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanError(device=metric_device)
y_pred = torch.rand(size=(100,), device=device)
y = torch.rand(size=(100,), device=device)
m.update((y_pred, y))
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device, tol=1e-5):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
me = MeanError(device=metric_device)
me.attach(engine, "me")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "me" in engine.state.metrics
res = engine.state.metrics["me"]
np_y = y_true.cpu().numpy()
np_y_pred = y_preds.cpu().numpy()
np_sum = (np_y - np_y_pred).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert pytest.approx(res, rel=tol) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from pytest import approx, raises
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanAbsoluteRelativeError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_wrong_input_shapes():
m = MeanAbsoluteRelativeError()
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_absolute_relative_error():
a = torch.rand(4)
b = torch.rand(4)
c = torch.rand(4)
d = torch.rand(4)
ground_truth = torch.rand(4)
m = MeanAbsoluteRelativeError()
m.update((a, ground_truth))
abs_error_a = torch.sum(torch.abs(ground_truth - a) / torch.abs(ground_truth))
num_samples_a = a.size()[0]
sum_error = abs_error_a
sum_samples = num_samples_a
MARE_a = sum_error / sum_samples
assert m.compute() == approx(MARE_a.item())
m.update((b, ground_truth))
abs_error_b = torch.sum(torch.abs(ground_truth - b) / torch.abs(ground_truth))
num_samples_b = b.size()[0]
sum_error += abs_error_b
sum_samples += num_samples_b
MARE_b = sum_error / sum_samples
assert m.compute() == approx(MARE_b.item())
m.update((c, ground_truth))
abs_error_c = torch.sum(torch.abs(ground_truth - c) / torch.abs(ground_truth))
num_samples_c = c.size()[0]
sum_error += abs_error_c
sum_samples += num_samples_c
MARE_c = sum_error / sum_samples
assert m.compute() == approx(MARE_c.item())
m.update((d, ground_truth))
abs_error_d = torch.sum(torch.abs(ground_truth - d) / torch.abs(ground_truth))
num_samples_d = d.size()[0]
sum_error += abs_error_d
sum_samples += num_samples_d
MARE_d = sum_error / sum_samples
assert m.compute() == approx(MARE_d.item())
def test_zero_div():
a = torch.tensor([2.0, -1.0, -1.0, 2.0])
ground_truth = torch.tensor([0.0, 0.5, 0.2, 1.0])
m = MeanAbsoluteRelativeError()
with raises(NotComputableError, match=r"The ground truth has 0"):
m.update((a, ground_truth))
def test_zero_sample():
m = MeanAbsoluteRelativeError()
with raises(NotComputableError, match=r"MeanAbsoluteRelativeError must have at least one sample"):
m.compute()
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanAbsoluteRelativeError()
m.attach(engine, "mare")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mare = engine.run(data, max_epochs=1).metrics["mare"]
abs_error = np.sum(abs(np_y - np_y_pred) / abs(np_y))
num_samples = len(y_pred)
res = abs_error / num_samples
assert res == approx(mare)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanAbsoluteRelativeError(device=metric_device)
y_pred = torch.randint(1, 11, size=(10,), device=device).float()
y = torch.randint(1, 11, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
abs_error = np.sum(abs(np_y - np_y_pred) / abs(np_y))
num_samples = len(y_pred)
np_res = abs_error / num_samples
assert np_res == approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanAbsoluteRelativeError(device=metric_device)
m.attach(engine, "mare")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mare" in engine.state.metrics
mare = engine.state.metrics["mare"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
abs_error = np.sum(abs(np_y_true - np_y_preds) / abs(np_y_true))
num_samples = len(y_preds)
np_res = abs_error / num_samples
assert approx(mare) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MaximumAbsoluteError()
with pytest.raises(
NotComputableError, match=r"MaximumAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MaximumAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_maximum_absolute_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MaximumAbsoluteError()
np_ans = -1
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((a - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((b - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((c - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((d - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MaximumAbsoluteError()
m.attach(engine, "mae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mae = engine.run(data, max_epochs=1).metrics["mae"]
np_max = np.max(np.abs((np_y_pred - np_y)))
assert np_max == pytest.approx(mae)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MaximumAbsoluteError(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_max = np.max(np.abs((np_y_pred - np_y)))
assert np_max == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MaximumAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_max = np.max(np.abs((np_y_preds - np_y_true)))
assert pytest.approx(res) == np_max
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import r2_score
import ignite.distributed as idist
from ignite.contrib.metrics.regression import R2Score
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = R2Score()
with pytest.raises(NotComputableError, match=r"R2Score must have at least one example before it can be computed"):
m.compute()
def test_wrong_input_shapes():
m = R2Score()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_r2_score():
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
m = R2Score()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
def test_r2_score_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
m = R2Score()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
def test_integration_r2_score():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = R2Score()
m.attach(engine, "r2_score")
data = list(range(size // batch_size))
r_squared = engine.run(data, max_epochs=1).metrics["r2_score"]
assert r2_score(np_y, np_y_pred) == pytest.approx(r_squared)
def _test_distrib_compute(device, tol=1e-6):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = R2Score(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert r2_score(np_y, np_y_pred) == pytest.approx(res, abs=tol)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.randint(0, 10, size=(n_iters * batch_size,)).to(device).float()
y_preds = torch.randint(0, 10, size=(n_iters * batch_size,)).to(device).float()
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
r2 = R2Score(device=metric_device)
r2.attach(engine, "r2")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "r2" in engine.state.metrics
res = engine.state.metrics["r2"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = r2_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device, tol=1e-3)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device, tol=1e-3)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
import torch.nn as nn
from torch.utils.data.distributed import DistributedSampler
import ignite.contrib.handlers as handlers
import ignite.distributed as idist
from ignite.contrib.engines.common import (
_setup_logging,
add_early_stopping_by_val_score,
gen_save_best_models_by_val_score,
save_best_model_by_val_score,
setup_any_logging,
setup_clearml_logging,
setup_common_training_handlers,
setup_mlflow_logging,
setup_neptune_logging,
setup_plx_logging,
setup_tb_logging,
setup_trains_logging,
setup_visdom_logging,
setup_wandb_logging,
)
from ignite.engine import Engine, Events
from ignite.handlers import DiskSaver, TerminateOnNan
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
def _test_setup_common_training_handlers(
dirname,
device,
rank=0,
local_rank=0,
distributed=False,
lr_scheduler=None,
save_handler=None,
output_transform=lambda loss: loss,
):
lr = 0.01
step_size = 100
gamma = 0.5
num_iters = 100
num_epochs = 10
model = DummyModel().to(device)
if distributed and "cuda" in torch.device(device).type:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
if lr_scheduler is None:
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite|LRScheduler":
from ignite.contrib.handlers import LRScheduler
lr_scheduler = LRScheduler(torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma))
elif isinstance(lr_scheduler, str) and lr_scheduler == "ignite":
from ignite.contrib.handlers import PiecewiseLinear
milestones_values = [(0, 0.0), (step_size, lr), (num_iters * (num_epochs - 1), 0.0)]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
else:
raise ValueError(f"Unknown lr_scheduler: {lr_scheduler}")
def update_fn(engine, batch):
optimizer.zero_grad()
x = torch.tensor([batch], requires_grad=True, device=device)
y_pred = model(x)
loss = y_pred.mean()
loss.backward()
optimizer.step()
return output_transform(loss)
train_sampler = None
if distributed and idist.get_world_size() > 1:
train_sampler = MagicMock(spec=DistributedSampler)
train_sampler.set_epoch = MagicMock()
trainer = Engine(update_fn)
setup_common_training_handlers(
trainer,
train_sampler=train_sampler,
to_save={"model": model, "optimizer": optimizer},
save_every_iters=75,
output_path=dirname,
save_handler=save_handler,
lr_scheduler=lr_scheduler,
with_gpu_stats=False,
output_names=["batch_loss"],
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=50,
)
data = [i * 0.1 for i in range(num_iters)]
trainer.run(data, max_epochs=num_epochs)
# check handlers
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
TerminateOnNan,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
assert "batch_loss" in trainer.state.metrics
# Check saved checkpoint
if rank == 0:
if save_handler is not None:
dirname = save_handler.dirname
checkpoints = list(os.listdir(dirname))
assert len(checkpoints) == 1
for v in [
"training_checkpoint",
]:
assert any([v in c for c in checkpoints])
# Check LR scheduling
assert optimizer.param_groups[0]["lr"] <= lr * gamma ** (
(num_iters * num_epochs - 1) // step_size
), f"{optimizer.param_groups[0]['lr']} vs {lr * gamma ** ((num_iters * num_epochs - 1) // step_size)}"
def test_asserts_setup_common_training_handlers():
trainer = Engine(lambda e, b: None)
with pytest.raises(
ValueError,
match=r"If to_save argument is provided then output_path or save_handler arguments should be also defined",
):
setup_common_training_handlers(trainer, to_save={})
with pytest.raises(ValueError, match=r"Arguments output_path and save_handler are mutually exclusive"):
setup_common_training_handlers(trainer, to_save={}, output_path="abc", save_handler=lambda c, f, m: None)
with pytest.warns(UserWarning, match=r"Argument train_sampler is a distributed sampler"):
train_sampler = MagicMock(spec=DistributedSampler)
setup_common_training_handlers(trainer, train_sampler=train_sampler)
if not torch.cuda.is_available():
with pytest.raises(RuntimeError, match=r"This contrib module requires available GPU"):
setup_common_training_handlers(trainer, with_gpu_stats=True)
with pytest.raises(TypeError, match=r"Unhandled type of update_function's output."):
trainer = Engine(lambda e, b: None)
setup_common_training_handlers(
trainer,
output_names=["loss"],
with_pbar_on_iters=False,
with_pbars=False,
with_gpu_stats=False,
stop_on_nan=False,
clear_cuda_cache=False,
)
trainer.run([1])
def test_no_warning_with_train_sampler(recwarn):
from torch.utils.data import RandomSampler
trainer = Engine(lambda e, b: None)
train_sampler = RandomSampler([0, 1, 2])
setup_common_training_handlers(trainer, train_sampler=train_sampler)
assert len(recwarn) == 0, recwarn.pop()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Should have more than 1 worker")
def test_assert_setup_common_training_handlers_wrong_train_sampler(distributed_context_single_node_gloo):
trainer = Engine(lambda e, b: None)
from torch.utils.data.sampler import RandomSampler
with pytest.raises(TypeError, match=r"Train sampler should be torch DistributedSampler"):
train_sampler = RandomSampler([0, 1, 2, 3])
setup_common_training_handlers(trainer, train_sampler)
def test_setup_common_training_handlers(dirname, capsys):
_test_setup_common_training_handlers(dirname, device="cpu")
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: [loss])
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: {"batch_loss": loss})
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
def test_setup_common_training_handlers_using_save_handler(dirname, capsys):
save_handler = DiskSaver(dirname=dirname, require_empty=False)
_test_setup_common_training_handlers(dirname=None, device="cpu", save_handler=save_handler)
# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"
def test_save_best_model_by_val_score(dirname):
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
model = DummyModel()
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": 1 - acc}
return trainer, evaluator, model
trainer, evaluator, model = setup_trainer()
save_best_model_by_val_score(dirname, evaluator, model, metric_name="acc", n_saved=2, trainer=trainer)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert set(os.listdir(dirname)) == {"best_model_8_val_acc=0.6100.pt", "best_model_9_val_acc=0.7000.pt"}
for fname in os.listdir(dirname):
os.unlink(f"{dirname}/{fname}")
trainer, evaluator, model = setup_trainer()
save_best_model_by_val_score(
dirname, evaluator, model, metric_name="loss", n_saved=2, trainer=trainer, score_sign=-1.0
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert set(os.listdir(dirname)) == {"best_model_8_val_loss=-0.3900.pt", "best_model_9_val_loss=-0.3000.pt"}
def test_gen_save_best_models_by_val_score():
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]
loss_scores = [0.9, 0.8, 0.7, 0.6, 0.7, 0.5, 0.4, 0.39, 0.3, 0.5]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
model = DummyModel()
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
loss = loss_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": loss}
return trainer, evaluator, model
trainer, evaluator, model = setup_trainer()
save_handler = MagicMock()
gen_save_best_models_by_val_score(
save_handler, evaluator, {"a": model, "b": model}, metric_name="acc", n_saved=2, trainer=trainer
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert save_handler.call_count == len(acc_scores) - 2 # 2 score values (0.3 and 0.5) are not the best
obj_to_save = {"a": model.state_dict(), "b": model.state_dict()}
save_handler.assert_has_calls(
[
call(
obj_to_save,
f"best_checkpoint_{e}_val_acc={p:.4f}.pt",
dict([("basename", "best_checkpoint"), ("score_name", "val_acc"), ("priority", p)]),
)
for e, p in zip([1, 2, 3, 4, 6, 7, 8, 9], [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.61, 0.7])
],
any_order=True,
)
trainer, evaluator, model = setup_trainer()
save_handler = MagicMock()
gen_save_best_models_by_val_score(
save_handler,
evaluator,
{"a": model, "b": model},
metric_name="loss",
n_saved=2,
trainer=trainer,
score_sign=-1.0,
)
trainer.run([0, 1], max_epochs=len(acc_scores))
assert save_handler.call_count == len(acc_scores) - 2 # 2 score values (-0.7 and -0.5) are not the best
obj_to_save = {"a": model.state_dict(), "b": model.state_dict()}
save_handler.assert_has_calls(
[
call(
obj_to_save,
f"best_checkpoint_{e}_val_loss={p:.4f}.pt",
dict([("basename", "best_checkpoint"), ("score_name", "val_loss"), ("priority", p)]),
)
for e, p in zip([1, 2, 3, 4, 6, 7, 8, 9], [-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.39, -0.3])
],
any_order=True,
)
def test_add_early_stopping_by_val_score():
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
def setup_trainer():
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
acc = acc_scores[trainer.state.epoch - 1]
engine.state.metrics = {"acc": acc, "loss": 1 - acc}
return trainer, evaluator
trainer, evaluator = setup_trainer()
add_early_stopping_by_val_score(patience=3, evaluator=evaluator, trainer=trainer, metric_name="acc")
state = trainer.run([0, 1], max_epochs=len(acc_scores))
assert state.epoch == 7
trainer, evaluator = setup_trainer()
add_early_stopping_by_val_score(
patience=3, evaluator=evaluator, trainer=trainer, metric_name="loss", score_sign=-1.0
)
state = trainer.run([0, 1], max_epochs=len(acc_scores))
assert state.epoch == 7
def test_deprecated_setup_any_logging():
with pytest.raises(DeprecationWarning, match=r"deprecated since version 0.4.0"):
setup_any_logging(None, None, None, None, None, None)
def test__setup_logging_wrong_args():
with pytest.raises(TypeError, match=r"Argument optimizers should be either a single optimizer or"):
_setup_logging(MagicMock(), MagicMock(), "abc", MagicMock(), 1)
with pytest.raises(TypeError, match=r"Argument evaluators should be either a single engine or"):
_setup_logging(MagicMock(), MagicMock(), MagicMock(spec=torch.optim.SGD), "abc", 1)
def _test_setup_logging(
setup_logging_fn,
kwargs_dict,
output_handler_cls,
opt_params_handler_cls,
with_eval=True,
with_optim=True,
as_class=False,
log_every_iters=1,
):
trainer = Engine(lambda e, b: b)
evaluators = None
optimizers = None
if with_eval:
evaluator = Engine(lambda e, b: None)
acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
evaluator.run([0, 1])
@evaluator.on(Events.EPOCH_COMPLETED)
def set_eval_metric(engine):
engine.state.metrics = {"acc": acc_scores[trainer.state.epoch - 1]}
evaluators = {"validation": evaluator}
if as_class:
evaluators = evaluators["validation"]
if with_optim:
t = torch.tensor([0])
optimizers = {"optimizer": torch.optim.SGD([t], lr=0.01)}
if as_class:
optimizers = optimizers["optimizer"]
kwargs_dict["trainer"] = trainer
kwargs_dict["optimizers"] = optimizers
kwargs_dict["evaluators"] = evaluators
kwargs_dict["log_every_iters"] = log_every_iters
x_logger = setup_logging_fn(**kwargs_dict)
handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
if with_optim:
handlers = trainer._event_handlers[Events.ITERATION_STARTED]
for cls in [
opt_params_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
if with_eval:
handlers = evaluator._event_handlers[Events.COMPLETED]
for cls in [
output_handler_cls,
]:
assert any([isinstance(h[0], cls) for h in handlers]), f"{handlers}"
data = [0, 1, 2]
trainer.run(data, max_epochs=10)
if "output_path" in kwargs_dict:
tb_files = list(os.listdir(kwargs_dict["output_path"]))
assert len(tb_files) == 1
for v in [
"events",
]:
assert any([v in c for c in tb_files]), f"{tb_files}"
return x_logger
def test_setup_tb_logging(dirname):
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t1"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t2"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
tb_logger.close()
tb_logger = _test_setup_logging(
setup_logging_fn=setup_tb_logging,
kwargs_dict={"output_path": dirname / "t3"},
output_handler_cls=handlers.tensorboard_logger.OutputHandler,
opt_params_handler_cls=handlers.tensorboard_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
as_class=True,
log_every_iters=None,
)
tb_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_setup_visdom_logging(visdom_offline_logfile):
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"offline": True, "log_to_filename": visdom_offline_logfile},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
vis_logger.close()
vis_logger = _test_setup_logging(
setup_logging_fn=setup_visdom_logging,
kwargs_dict={"offline": True, "log_to_filename": visdom_offline_logfile},
output_handler_cls=handlers.visdom_logger.OutputHandler,
opt_params_handler_cls=handlers.visdom_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
vis_logger.close()
def test_setup_plx_logging():
os.environ["POLYAXON_NO_OP"] = "1"
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
_test_setup_logging(
setup_logging_fn=setup_plx_logging,
kwargs_dict={},
output_handler_cls=handlers.polyaxon_logger.OutputHandler,
opt_params_handler_cls=handlers.polyaxon_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_setup_mlflow_logging(dirname):
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": str(dirname / "p1")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
mlf_logger.close()
mlf_logger = _test_setup_logging(
setup_logging_fn=setup_mlflow_logging,
kwargs_dict={"tracking_uri": str(dirname / "p2")},
output_handler_cls=handlers.mlflow_logger.OutputHandler,
opt_params_handler_cls=handlers.mlflow_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
mlf_logger.close()
def test_setup_wandb_logging(dirname):
from unittest.mock import patch
with patch("ignite.contrib.engines.common.WandBLogger") as _:
setup_wandb_logging(MagicMock())
def test_setup_clearml_logging():
handlers.clearml_logger.ClearMLLogger.set_bypass_mode(True)
with pytest.warns(UserWarning, match=r"running in bypass mode"):
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_clearml_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
clearml_logger.close()
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_clearml_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
with pytest.warns(UserWarning, match="setup_trains_logging was renamed to setup_clearml_logging"):
clearml_logger = _test_setup_logging(
setup_logging_fn=setup_trains_logging,
kwargs_dict={},
output_handler_cls=handlers.clearml_logger.OutputHandler,
opt_params_handler_cls=handlers.clearml_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
clearml_logger.close()
def test_setup_neptune_logging(dirname):
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"mode": "offline"},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=False,
with_optim=False,
)
npt_logger.close()
npt_logger = _test_setup_logging(
setup_logging_fn=setup_neptune_logging,
kwargs_dict={"mode": "offline"},
output_handler_cls=handlers.neptune_logger.OutputHandler,
opt_params_handler_cls=handlers.neptune_logger.OptimizerParamsHandler,
with_eval=True,
with_optim=True,
)
npt_logger.close()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(dirname, distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = idist.device()
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(dirname, distributed_context_single_node_gloo):
device = idist.device()
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite|LRScheduler"
)
_test_setup_common_training_handlers(
dirname, device, rank=local_rank, local_rank=local_rank, distributed=True, lr_scheduler="ignite"
)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(dirname, distributed_context_multi_node_gloo):
device = idist.device()
rank = distributed_context_multi_node_gloo["rank"]
_test_setup_common_training_handlers(dirname, device, rank=rank)
test_add_early_stopping_by_val_score()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(dirname, distributed_context_multi_node_nccl):
local_rank = distributed_context_multi_node_nccl["local_rank"]
rank = distributed_context_multi_node_nccl["rank"]
device = idist.device()
_test_setup_common_training_handlers(dirname, device, rank=rank, local_rank=local_rank, distributed=True)
test_add_early_stopping_by_val_score()
|
# coding: utf-8
import unittest.mock as mock
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ignite.contrib.engines import create_supervised_tbptt_trainer, Tbptt_Events
from ignite.contrib.engines.tbptt import _detach_hidden
def test_detach_hidden_RNN():
# Create hidden vector (in tuple)
X = torch.ones(2, 3, 4)
model = nn.RNN(4, 1)
_, hidden = model(X)
# Function to test
hidden_ = _detach_hidden(hidden)
assert hidden_.grad_fn is None # properly detached
assert (hidden == hidden_).all().item() == 1 # Equal values
def test_detach_hidden_LSTM():
# Create hidden vector (in tuple)
X = torch.ones(2, 3, 4)
model = nn.LSTM(4, 1)
_, hidden = model(X)
# Function to test
hidden_ = _detach_hidden(hidden)
for h, h_ in zip(hidden, hidden_):
assert h_.grad_fn is None # properly detached
assert (h == h_).all().item() == 1 # Equal values
def test_detach_hidden_raise():
with pytest.raises(TypeError):
_detach_hidden(0)
@mock.patch("ignite.contrib.engines.tbptt._detach_hidden")
def test_create_supervised_tbptt_trainer_callcounts(mock_detach_hidden):
# Mocking objects
model = mock.MagicMock()
# Necessary to unpack output
model.return_value = (1, 1)
optimizer = mock.MagicMock()
loss = mock.MagicMock()
trainer = create_supervised_tbptt_trainer(model, optimizer, loss, tbtt_step=2)
# Adding two mock handles to the trainer to monitor that TBPTT events are
# called correctly
handle_started = mock.MagicMock()
trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_STARTED, handle_started)
handle_completed = mock.MagicMock()
trainer.add_event_handler(Tbptt_Events.TIME_ITERATION_COMPLETED, handle_completed)
# Fake data
X = torch.ones(6, 2, 1)
y = torch.ones(6, 2, 1)
data = [(X, y)]
# Running trainer
trainer.run(data)
# Verifications
assert handle_started.call_count == 3
assert handle_completed.call_count == 3
assert mock_detach_hidden.call_count == 2
assert model.call_count == 3
assert loss.call_count == 3
assert optimizer.zero_grad.call_count == 3
assert optimizer.step.call_count == 3
n_args_tuple = tuple(len(args) for args, kwargs in model.call_args_list)
assert n_args_tuple == (1, 2, 2)
def _test_create_supervised_tbptt_trainer(device):
# Defining dummy recurrent model with zero weights
model = nn.RNN(1, 1, bias=False)
model.to(device) # Move model before creating optimizer
for p in model.parameters():
p.data.zero_()
# Set some mock on forward to monitor
forward_mock = mock.MagicMock()
forward_mock.return_value = None
model.register_forward_hook(forward_mock)
# Defning optimizer and trainer
optimizer = optim.SGD(model.parameters(), 1)
trainer = create_supervised_tbptt_trainer(model, optimizer, F.mse_loss, tbtt_step=2, device=device)
# Fake data
X = torch.ones(6, 2, 1)
y = torch.ones(6, 2, 1)
data = [(X, y)]
# Running trainer
trainer.run(data)
# If tbptt is not use (one gradient update), the hidden to hidden weight
# should stay zero
assert not model.weight_hh_l0.item() == pytest.approx(0)
# Cheking forward calls
assert forward_mock.call_count == 3
for i in range(3):
inputs = forward_mock.call_args_list[i][0][1]
if i == 0:
assert len(inputs) == 1
else:
assert len(inputs) == 2
x, h = inputs
assert h.is_leaf
def test_create_supervised_tbptt_trainer_with_cpu():
_test_create_supervised_tbptt_trainer("cpu")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_tbptt_trainer_on_cuda():
_test_create_supervised_tbptt_trainer("cuda")
|
from unittest.mock import Mock
import pytest
import torch
@pytest.fixture()
def norm_mock():
def norm(x: torch.Tensor):
return x.norm()
norm_mock = Mock(side_effect=norm, spec=norm)
norm_mock.configure_mock(**{"__name__": "norm"})
norm_mock.reset_mock()
return norm_mock
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
def get_dummy_model(with_grads=True, with_frozen_layer=False, with_buffer=False):
model = DummyModel()
if with_grads:
model.fc2.weight.grad = torch.zeros_like(model.fc2.weight)
model.fc2.bias.grad = torch.zeros_like(model.fc2.bias)
if not with_frozen_layer:
model.fc1.weight.grad = torch.zeros_like(model.fc1.weight)
model.fc1.bias.grad = torch.zeros_like(model.fc1.bias)
if with_frozen_layer:
for param in model.fc1.parameters():
param.requires_grad = False
if with_buffer:
model.register_buffer("buffer1", torch.ones(1))
return model
return get_dummy_model
|
import sys
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.mlflow_logger import (
global_step_from_engine,
MLflowLogger,
OptimizerParamsHandler,
OutputHandler,
)
from ignite.engine import Engine, Events, State
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler 'OutputHandler' works only with MLflowLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"tag output": 12345}, step=123)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"another_tag loss": 12345}, step=123)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5)], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call({"tag a": 55.56}, step=7)], any_order=True)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with({"tag loss": 12345}, step=10)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call({"tag loss": mock_engine.state.output}, step=mock_another_engine.state.epoch)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
{"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0}, step=5
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OptimizerParamsHandler works only with MLflowLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"lr group_0": 0.01}, step=123)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with({"generator lr group_0": 0.01}, step=123)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
mlflow_logger = MLflowLogger(tracking_uri=str(dirname / "mlruns"))
true_values = []
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
v = global_step * 0.1
true_values.append(v)
logger.log_metrics({"test_value": v}, step=global_step)
mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
import mlflow
active_run = mlflow.active_run()
trainer.run(data, max_epochs=n_epochs)
mlflow_logger.close()
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "test_value")
for t, s in zip(true_values, stored_values):
assert pytest.approx(t) == s.value
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
true_values = []
with MLflowLogger(str(dirname / "mlruns")) as mlflow_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
v = global_step * 0.1
true_values.append(v)
logger.log_metrics({"test_value": v}, step=global_step)
mlflow_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
import mlflow
active_run = mlflow.active_run()
trainer.run(data, max_epochs=n_epochs)
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "test_value")
for t, s in zip(true_values, stored_values):
assert pytest.approx(t) == s.value
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_mlflow_bad_metric_name_handling(dirname):
import mlflow
true_values = [123.0, 23.4, 333.4]
with MLflowLogger(str(dirname / "mlruns")) as mlflow_logger:
active_run = mlflow.active_run()
handler = OutputHandler(tag="training", metric_names="all")
engine = Engine(lambda e, b: None)
engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0})
with pytest.warns(UserWarning, match=r"MLflowLogger output_handler encountered an invalid metric name"):
engine.state.epoch = 1
handler(engine, mlflow_logger, event_name=Events.EPOCH_COMPLETED)
for _, v in enumerate(true_values):
engine.state.epoch += 1
engine.state.metrics["metric 0"] = v
handler(engine, mlflow_logger, event_name=Events.EPOCH_COMPLETED)
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=str(dirname / "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "training metric 0")
for t, s in zip([1000.0] + true_values, stored_values):
assert t == s.value
@pytest.mark.parametrize("no_site_packages", ["mlflow"], indirect=True)
def test_no_mlflow_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires mlflow to be installed."):
MLflowLogger()
|
from typing import Any, Union
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events, EventsList, State
from tests.ignite.contrib.handlers import MockFP16DeepSpeedZeroOptimizer
class DummyOutputHandler(BaseOutputHandler):
def __call__(self, *args, **kwargs):
pass
class DummyOptParamsHandler(BaseOptimizerParamsHandler):
def __call__(self, engine, logger, event_name, **kwargs):
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
return params
class DummyLogger(BaseLogger):
def _create_output_handler(self, *args, **kwargs):
return DummyOutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args, **kwargs):
return DummyOptParamsHandler(*args, **kwargs)
class DummyWeightsHandler(BaseWeightsHandler):
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
class DummyWeightsScalarHandler(BaseWeightsScalarHandler):
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
def test_base_output_handler_wrong_setup():
with pytest.raises(TypeError, match="metric_names should be either a list or equal 'all'"):
DummyOutputHandler("tag", metric_names="abc", output_transform=None)
with pytest.raises(TypeError, match="output_transform should be a function"):
DummyOutputHandler("tag", metric_names=None, output_transform="abc")
with pytest.raises(ValueError, match="Either metric_names, output_transform or state_attributes should be defined"):
DummyOutputHandler("tag", None, None)
with pytest.raises(TypeError, match="global_step_transform should be a function"):
DummyOutputHandler("tag", metric_names=["loss"], global_step_transform="abc")
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
DummyOptParamsHandler({}, "lr")
def test_base_output_handler_setup_output_metrics():
engine = Engine(lambda engine, batch: None)
true_metrics = {"a": 0, "b": 1}
engine.state = State(metrics=true_metrics)
engine.state.output = 12345
# Only metric_names
handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=None)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1}
# Only metric_names with a warning
handler = DummyOutputHandler("tag", metric_names=["a", "c"], output_transform=None)
with pytest.warns(UserWarning):
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0}
# Only output as "output"
handler = DummyOutputHandler("tag", metric_names=None, output_transform=lambda x: x)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/output": engine.state.output}
# Only output as "loss"
handler = DummyOutputHandler("tag", metric_names=None, output_transform=lambda x: {"loss": x})
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/loss": engine.state.output}
# Metrics and output
handler = DummyOutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1, "tag/loss": engine.state.output}
# All metrics
handler = DummyOutputHandler("tag", metric_names="all", output_transform=None)
metrics = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert metrics == {"tag/a": 0, "tag/b": 1}
def test_base_output_handler_setup_output_state_attrs():
engine = Engine(lambda engine, batch: None)
true_metrics = {"a": 0, "b": 1}
engine.state = State(metrics=true_metrics)
engine.state.alpha = 3.899
engine.state.beta = torch.tensor(5.499)
engine.state.gamma = torch.tensor([2106.0, 6.0])
engine.state.output = 12345
# Only State Attributes
handler = DummyOutputHandler(
tag="tag", metric_names=None, output_transform=None, state_attributes=["alpha", "beta", "gamma"]
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
}
# Metrics and Attributes
handler = DummyOutputHandler(
tag="tag", metric_names=["a", "b"], output_transform=None, state_attributes=["alpha", "beta", "gamma"]
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/a": 0,
"tag/b": 1,
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
}
# Metrics, Attributes and output
handler = DummyOutputHandler(
tag="tag",
metric_names="all",
output_transform=lambda x: {"loss": x},
state_attributes=["alpha", "beta", "gamma"],
)
state_attrs = handler._setup_output_metrics_state_attrs(engine=engine, key_tuple=False)
assert state_attrs == {
"tag/a": 0,
"tag/b": 1,
"tag/alpha": 3.899,
"tag/beta": torch.tensor(5.499),
"tag/gamma/0": 2106.0,
"tag/gamma/1": 6.0,
"tag/loss": engine.state.output,
}
def test_opt_params_handler_on_non_torch_optimizers():
tensor = torch.zeros([1], requires_grad=True)
base_optimizer = torch.optim.SGD([tensor], lr=0.1234)
optimizer = MockFP16DeepSpeedZeroOptimizer(base_optimizer)
handler = DummyOptParamsHandler(optimizer=optimizer, param_name="lr")
res = handler(engine=None, logger=None, event_name=None)
assert isinstance(res, dict)
assert "lr/group_0" in res and res["lr/group_0"] == 0.1234
@pytest.mark.parametrize(
"event, n_calls, kwargs",
[
(Events.ITERATION_STARTED, 50 * 5, {"a": 0}),
(Events.ITERATION_COMPLETED, 50 * 5, {}),
(Events.EPOCH_STARTED, 5, {}),
(Events.EPOCH_COMPLETED, 5, {}),
(Events.STARTED, 1, {}),
(Events.COMPLETED, 1, {}),
(Events.ITERATION_STARTED(every=10), 50 // 10 * 5, {}),
(Events.STARTED | Events.COMPLETED, 2, {}),
],
)
def test_attach(event, n_calls, kwargs):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
logger = DummyLogger()
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event, **kwargs)
trainer.run(data, max_epochs=n_epochs)
if isinstance(event, EventsList):
events = [e for e in event]
else:
events = [event]
if len(kwargs) > 0:
calls = [call(trainer, logger, e, **kwargs) for e in events]
else:
calls = [call(trainer, logger, e) for e in events]
mock_log_handler.assert_has_calls(calls)
assert mock_log_handler.call_count == n_calls
def test_attach_wrong_event_name():
trainer = Engine(lambda b, e: None)
logger = DummyLogger()
mock_log_handler = MagicMock()
with pytest.raises(RuntimeError, match="Unknown event name"):
logger.attach(trainer, log_handler=mock_log_handler, event_name="unknown")
events_list = EventsList()
events_list._events = ["unknown"]
with pytest.raises(RuntimeError, match="Unknown event name"):
logger.attach(trainer, log_handler=mock_log_handler, event_name=events_list)
def test_attach_on_custom_event():
n_epochs = 10
data = list(range(150))
def _test(event, n_calls, cpe):
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
cpe.attach(trainer)
logger = DummyLogger()
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event)
trainer.run(data, max_epochs=n_epochs)
mock_log_handler.assert_called_with(trainer, logger, event)
assert mock_log_handler.call_count == n_calls
@pytest.mark.parametrize(
"event, n_calls",
[
(Events.ITERATION_STARTED, 50 * 5),
(Events.ITERATION_COMPLETED, 50 * 5),
(Events.EPOCH_STARTED, 5),
(Events.EPOCH_COMPLETED, 5),
(Events.STARTED, 1),
(Events.COMPLETED, 1),
(Events.ITERATION_STARTED(every=10), 50 // 10 * 5),
],
)
def test_as_context_manager(event, n_calls):
n_epochs = 5
data = list(range(50))
class _DummyLogger(DummyLogger):
def __init__(self, writer):
self.writer = writer
def close(self):
self.writer.close()
global close_counter
close_counter = 0
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
writer = MagicMock()
writer.close = MagicMock()
with _DummyLogger(writer) as logger:
assert isinstance(logger, _DummyLogger)
trainer = Engine(update_fn)
mock_log_handler = MagicMock()
logger.attach(trainer, log_handler=mock_log_handler, event_name=event)
trainer.run(data, max_epochs=n_epochs)
mock_log_handler.assert_called_with(trainer, logger, event)
assert mock_log_handler.call_count == n_calls
writer.close.assert_called_once_with()
def test_base_weights_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
DummyWeightsHandler(None)
def test_base_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
DummyWeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
DummyWeightsScalarHandler(model, reduction=lambda x: x)
|
import math
import os
from collections import defaultdict
from unittest.mock import ANY, call, MagicMock, patch
import clearml
import pytest
import torch
from clearml.binding.frameworks import WeightsFileHandler
from clearml.model import Framework
import ignite.distributed as idist
from ignite.contrib.handlers.clearml_logger import (
ClearMLLogger,
ClearMLSaver,
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint
def test_no_clearml():
with patch.dict("sys.modules", {"clearml": None, "trains": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLSaver()
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLLogger()
with patch.dict("sys.modules", {"clearml.binding.frameworks.tensorflow_bind": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLLogger()
with patch.dict("sys.modules", {"clearml.binding.frameworks": None, "trains.binding.frameworks": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires clearml to be installed."):
ClearMLSaver.__call__(None, {}, "")
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with ClearMLLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(iteration=123, series="0", title="lr", value=0.01)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="0", title="generator/lr", value=0.01
)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OutputHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform(dirname):
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="output", title="tag", value=12345
)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
iteration=123, series="loss", title="another_tag", value=12345
)
def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
with pytest.warns(UserWarning, match=r"Logger output_handler can not log metrics value type"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="a", iteration=7, value=55.56)], any_order=True
)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
],
any_order=True,
)
# log a torch vector
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
vector = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.33])
mock_engine.state = State(metrics={"vector": vector})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 5
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag/vector", series=str(i), iteration=5, value=vector[i].item()) for i in range(5)],
any_order=True,
)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45), "c": torch.tensor(5.01)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 3
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=torch.tensor(12.23).item()),
call(title="tag", series="b", iteration=5, value=torch.tensor(23.45).item()),
call(title="tag", series="c", iteration=5, value=torch.tensor(5.01).item()),
],
any_order=True,
)
def test_output_handler_both(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 3
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="a", iteration=5, value=12.23),
call(title="tag", series="b", iteration=5, value=23.45),
call(title="tag", series="loss", iteration=5, value=12345),
],
any_order=True,
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=mock_another_engine.state.epoch, value=mock_engine.state.output)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.0)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 4
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="tag", series="alpha", iteration=5, value=3.899),
call(title="tag", series="beta", iteration=5, value=12.0),
call(title="tag/gamma", series="0", iteration=5, value=21.0),
call(title="tag/gamma", series="1", iteration=5, value=6.0),
],
any_order=True,
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.clearml_logger.report_scalar.call_count == 1
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[call(title="tag", series="loss", iteration=10, value=12345)]
)
def test_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler WeightsScalarHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.clearml_logger.report_scalar.call_count == 4
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title=tag_prefix + "weights_norm/fc1", series="weight", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc1", series="bias", iteration=5, value=0.0),
call(title=tag_prefix + "weights_norm/fc2", series="weight", iteration=5, value=12.0),
call(title=tag_prefix + "weights_norm/fc2", series="bias", iteration=5, value=math.sqrt(12.0)),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
title="weights_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
)
mock_logger.clearml_logger.report_scalar.reset_mock()
wrapper = WeightsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/weights_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch),
call(title="model/weights_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/weights_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(title="model/weights_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
def test_weights_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsHistHandler' works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_called_once_with(
title="weights_fc2", hist_data=ANY, series="weight", step=5
)
mock_logger.grad_helper.add_histogram.reset_mock()
wrapper = WeightsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/weights_fc1", hist_data=ANY, series="weight", step=5),
call(title="model/weights_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
mock_logger.grad_helper.add_histogram.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/weights_fc1", hist_data=ANY, series="bias", step=5),
call(title="model/weights_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
def test_grads_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler GradsScalarHandler works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(
title=tag_prefix + "grads_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(
title=tag_prefix + "grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
),
call(title=tag_prefix + "grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_called_once_with(
title="grads_norm/fc2", value=ANY, series="weight", iteration=mock_engine.state.epoch
)
mock_logger.clearml_logger.report_scalar.reset_mock()
wrapper = GradsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/grads_norm/fc1", value=ANY, series="weight", iteration=mock_engine.state.epoch),
call(title="model/grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
mock_logger.clearml_logger.report_scalar.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.clearml_logger.report_scalar.assert_has_calls(
[
call(title="model/grads_norm/fc1", value=ANY, series="bias", iteration=mock_engine.state.epoch),
call(title="model/grads_norm/fc2", value=ANY, series="bias", iteration=mock_engine.state.epoch),
],
any_order=True,
)
assert mock_logger.clearml_logger.report_scalar.call_count == 2
def test_grads_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsHistHandler' works only with ClearMLLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.grad_helper.add_histogram.call_count == 4
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc1", hist_data=ANY, series="bias", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="weight", step=5),
call(title=tag_prefix + "grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_grads_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.grad_helper = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_called_once_with(
title="grads_fc2", hist_data=ANY, series="weight", step=5
)
mock_logger.grad_helper.reset_mock()
wrapper = GradsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/grads_fc1", hist_data=ANY, series="weight", step=5),
call(title="model/grads_fc1", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
mock_logger.grad_helper.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.grad_helper.add_histogram.assert_has_calls(
[
call(title="model/grads_fc1", hist_data=ANY, series="bias", step=5),
call(title="model/grads_fc2", hist_data=ANY, series="bias", step=5),
],
any_order=True,
)
assert mock_logger.grad_helper.add_histogram.call_count == 2
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
logger = ClearMLLogger(output_uri=dirname)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
test_value = 0.3 # example
logger.clearml_logger.report_scalar(title="", series="", value=test_value, iteration=global_step)
logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
logger.close()
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
with ClearMLLogger(output_uri=dirname) as clearml_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
test_value = 0.3 # example
logger.clearml_logger.report_scalar(title="", series="", value=test_value, iteration=global_step)
clearml_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_clearml_logger_getattr_method(dirname):
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
logger = ClearMLLogger(output_uri=dirname)
# Create a mock clearml.Logger() object
mock_logger = MagicMock()
logger.clearml_logger = mock_logger
# Test a method called by __getattr__ calls the corresponding method of the mock project.
logger.report_single_value("accuracy", 0.72)
mock_logger.report_single_value.assert_called_once_with("accuracy", 0.72)
# Test a method called by __getattr__ calls the corresponding classmethod of the mock project's class.
logger.current_logger()
mock_logger.current_logger.assert_called_once()
logger.close()
def test_clearml_logger_get_task_bypass(dirname):
with pytest.warns(UserWarning, match="ClearMLSaver: running in bypass mode"):
ClearMLLogger.set_bypass_mode(True)
with ClearMLLogger(output_uri=dirname) as clearml_logger:
task = clearml_logger.get_task()
assert isinstance(task, clearml.Task)
assert task == clearml.Task.current_task()
task.close()
def test_clearml_disk_saver_integration():
model = torch.nn.Module()
to_save_serializable = {"model": model}
with pytest.warns(UserWarning, match="ClearMLSaver created a temporary checkpoints directory"):
mock_logger = MagicMock(spec=ClearMLLogger)
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml_saver = ClearMLSaver(mock_logger)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
checkpoint = Checkpoint(to_save=to_save_serializable, save_handler=clearml_saver, n_saved=1)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpoint(trainer)
trainer.state.iteration = 1
checkpoint(trainer)
if clearml_saver._atomic:
assert clearml.binding.frameworks.WeightsFileHandler.create_output_model.call_count == 2
else:
saved_files = list(os.listdir(clearml_saver.dirname))
assert len(saved_files) == 1
assert saved_files[0] == "model_1.pt"
def test_clearml_disk_saver_integration_no_logger():
model = torch.nn.Module()
to_save_serializable = {"model": model}
with pytest.warns(UserWarning, match="ClearMLSaver created a temporary checkpoints directory"):
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
clearml_saver = ClearMLSaver()
checkpoint = Checkpoint(to_save=to_save_serializable, save_handler=clearml_saver, n_saved=1)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpoint(trainer)
trainer.state.iteration = 1
checkpoint(trainer)
if clearml_saver._atomic:
assert clearml.binding.frameworks.WeightsFileHandler.create_output_model.call_count == 2
else:
saved_files = list(os.listdir(clearml_saver.dirname))
assert len(saved_files) == 1
assert saved_files[0] == "model_1.pt"
def test_clearml_saver_callbacks():
mock_task = MagicMock(spec=clearml.Task)
mock_task.name = "check-task"
mock_model = MagicMock(spec=clearml.OutputModel)
model_info = WeightsFileHandler.ModelInfo(
model=mock_model,
upload_filename="test.pt",
local_model_path="",
local_model_id="",
framework=Framework.pytorch,
task=mock_task,
)
mock_model_info = MagicMock(spec_set=model_info)
# Simulate 4 calls to save model and 2 to remove (n_saved=2)
filenames = [
"best_model_5_val_acc=0.123.pt",
"best_model_6_val_acc=0.234.pt",
"best_model_7_val_acc=0.356.pt",
"best_model_8_val_acc=0.456.pt",
]
metadata_list = [
{"basename": "best_model", "score_name": "val_acc", "priority": 0.123},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.234},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.345},
{"basename": "best_model", "score_name": "val_acc", "priority": 0.456},
]
dirname = "/tmp/test"
_checkpoint_slots = defaultdict(list)
n_saved = 2
for i, (filename, metadata) in enumerate(zip(filenames, metadata_list)):
mock_model_info.upload_filename = filename
if i >= n_saved:
# Remove
filename_to_remove = filenames[i % n_saved]
for slots in _checkpoint_slots.values():
try:
slots[slots.index(filename_to_remove)] = None
except ValueError:
pass
else:
i = i % n_saved
break
basename = metadata["basename"]
checkpoint_key = (dirname, basename)
context = ClearMLSaver._CallbacksContext(
callback_type=WeightsFileHandler.CallbackType,
slots=_checkpoint_slots[checkpoint_key],
checkpoint_key=str(checkpoint_key),
filename=filename,
basename=basename,
metadata=metadata,
)
output_model_info = context.pre_callback(str(WeightsFileHandler.CallbackType.save), mock_model_info)
assert (
hasattr(output_model_info, "upload_filename") and f"{basename}_{i}.pt" in output_model_info.upload_filename
)
assert hasattr(output_model_info, "local_model_id") and str(checkpoint_key) in output_model_info.local_model_id
output_model_info = context.post_callback(str(WeightsFileHandler.CallbackType.save), mock_model_info)
assert hasattr(output_model_info, "model") and hasattr(output_model_info.model, "name")
assert hasattr(output_model_info, "model") and hasattr(output_model_info.model, "comment")
assert isinstance(output_model_info.model.name, str) and filename in output_model_info.model.name
assert (
isinstance(output_model_info.model.comment, str)
and metadata["basename"] in output_model_info.model.comment
and metadata["score_name"] in output_model_info.model.comment
)
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = torch.nn.Linear(2, 2)
def forward(self, x):
return self.net(x)
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=False):
if idist.get_rank() == 0:
clearml.Task.current_task = MagicMock(spec=clearml.Task)
clearml.binding.frameworks.WeightsFileHandler.create_output_model = MagicMock()
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 2)).to(device)
optim.zero_grad()
y = model(x)
# Below code raises: RuntimeError: torch_xla/csrc/tensor_impl.cpp:144 : XLA tensors do not have storage
# Probably related to https://github.com/pytorch/xla/issues/2576
# loss = y.pow(2.0).sum()
loss = y.sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
with pytest.warns(UserWarning, match=r"ClearMLSaver created a temporary checkpoints directory"):
clearml_saver = ClearMLSaver()
if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):
checkpoint = Checkpoint(to_save=to_save, save_handler=clearml_saver, n_saved=1)
engine.add_event_handler(Events.EPOCH_COMPLETED, checkpoint)
engine.run([0], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(clearml_saver.dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = clearml_saver.dirname / saved_objects[0]
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert (model_value.cpu().numpy() == loaded_model_value.cpu().numpy()).all()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=True)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, on_zero_rank=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla():
device = idist.device()
assert "xla" in device.type
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
def _test_save_model_optimizer_lr_scheduler_with_state_dict_xla_nprocs(index):
device = idist.device()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device)
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_save_model_optimizer_lr_scheduler_with_state_dict_xla_nprocs, args=(), nprocs=n)
|
class MockFP16DeepSpeedZeroOptimizer:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self, closure=None):
self.optimizer.step()
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
|
import math
import warnings
from unittest.mock import MagicMock
import pytest
import torch
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
OptimizerParamsHandler,
OutputHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def assert_logger_called_once_with(logger, key, value):
result = logger[key].fetch_values()
assert len(result.value) == 1
if isinstance(result.value[0], float):
assert math.isclose(result.value[0], value, abs_tol=0.01)
else:
assert result.value[0] == value
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OptimizerParamsHandler works only with NeptuneLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "lr/group_0", 0.01)
logger.stop()
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "generator/lr/group_0", 0.01)
logger.stop()
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler OutputHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/output", 12345)
logger.stop()
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "another_tag/loss", 12345)
logger.stop()
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
for key, val in [("tag/a/0", 0.0), ("tag/a/1", 1.0), ("tag/a/2", 2.0), ("tag/a/3", 3.0)]:
assert_logger_called_once_with(logger, key, val)
logger.stop()
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
with pytest.warns(UserWarning):
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 55.56)
logger.stop()
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45)})
mock_engine.state.iteration = 5
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
logger.stop()
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/a", 12.23)
assert_logger_called_once_with(logger, "tag/b", 23.45)
assert_logger_called_once_with(logger, "tag/loss", 12345)
logger.stop()
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
logger.stop()
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/loss", mock_engine.state.output)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
result = logger["tag/loss"].fetch_values()
assert len(result.value) == 2
assert result.value[1] == mock_engine.state.output
logger.stop()
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "tag/loss", 12345)
logger.stop()
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.23)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, logger, Events.ITERATION_STARTED)
assert_logger_called_once_with(logger, "tag/alpha", 3.899)
assert_logger_called_once_with(logger, "tag/beta", 12.23)
assert_logger_called_once_with(logger, "tag/gamma/0", 21.0)
assert_logger_called_once_with(logger, "tag/gamma/1", 6.0)
logger.stop()
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler WeightsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc1/weight", 0.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc1/bias", 0.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc2/weight", 12.0)
assert_logger_called_once_with(logger, tag_prefix + "weights_norm/fc2/bias", math.sqrt(12.0))
logger.stop()
_test()
_test(tag="tag")
def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = WeightsScalarHandler(model)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert_logger_called_once_with(logger, "weights_norm/fc2/weight", 12.0)
assert_logger_called_once_with(logger, "weights_norm/fc2/bias", math.sqrt(12.0))
assert not logger.exists("weights_norm/fc1/weight")
assert not logger.exists("weights_norm/fc1/bias")
logger.stop()
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(TypeError, match="Handler GradsScalarHandler works only with NeptuneLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert logger.exists(tag_prefix + "grads_norm/fc1/weight")
assert logger.exists(tag_prefix + "grads_norm/fc1/bias")
assert logger.exists(tag_prefix + "grads_norm/fc2/weight")
assert logger.exists(tag_prefix + "grads_norm/fc2/bias")
logger.stop()
_test()
_test(tag="tag")
def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=True)
wrapper = GradsScalarHandler(model, reduction=norm_mock)
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, logger, Events.EPOCH_STARTED)
assert logger.exists("grads_norm/fc2/weight")
assert logger.exists("grads_norm/fc2/bias")
assert not logger.exists("grads_norm/fc1/weight")
assert not logger.exists("grads_norm/fc1/bias")
logger.stop()
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
npt_logger = NeptuneLogger(mode="offline")
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger["test_value"].append(global_step, step=global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
npt_logger.close()
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with NeptuneLogger(mode="offline") as npt_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger["test_value"].append(global_step, step=global_step)
npt_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_neptune_saver_serializable(dirname):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.upload = MagicMock()
model = torch.nn.Module()
to_save_serializable = {"model": model}
saver = NeptuneSaver(mock_logger)
fname = dirname / "test.pt"
saver(to_save_serializable, fname)
assert mock_logger[dirname / "test.pt"].upload.call_count == 1
@pytest.mark.parametrize("model, serializable", [(lambda x: x, False), (torch.nn.Module().to("cpu"), True)])
def test_neptune_saver(model, serializable):
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.upload = MagicMock()
to_save_non_serializable = {"model": model}
saver = NeptuneSaver(mock_logger)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(to_save_non_serializable, fname)
except Exception:
pass
assert mock_logger["model"].upload.call_count == int(serializable)
def test_logs_version():
from ignite import __version__
from ignite.contrib.handlers.neptune_logger import _INTEGRATION_VERSION_KEY
logger = NeptuneLogger(
project="tests/dry-run",
mode="debug",
)
assert logger[_INTEGRATION_VERSION_KEY].fetch() == __version__
|
import sys
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.visdom_logger import (
_DummyExecutor,
global_step_from_engine,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with VisdomLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
# mock_logger.vis.line.assert_called_once_with("lr/group_0", 0.01, 123)
assert len(wrapper.windows) == 1 and "lr/group_0" in wrapper.windows
assert wrapper.windows["lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["lr/group_0"]["opts"],
name="lr/group_0",
)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "generator/lr/group_0" in wrapper.windows
assert wrapper.windows["generator/lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["generator/lr/group_0"]["opts"],
name="generator/lr/group_0",
)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform(dirname):
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "tag/output" in wrapper.windows
assert wrapper.windows["tag/output"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/output"]["opts"],
name="tag/output",
)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "another_tag/loss" in wrapper.windows
assert wrapper.windows["another_tag/loss"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
X=[123],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["another_tag/loss"]["opts"],
name="another_tag/loss",
)
def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert mock_logger.vis.line.call_count == 2
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 4 and all([f"tag/a/{i}" in wrapper.windows for i in range(4)])
assert wrapper.windows["tag/a/0"]["win"] is not None
assert wrapper.windows["tag/a/1"]["win"] is not None
assert wrapper.windows["tag/a/2"]["win"] is not None
assert wrapper.windows["tag/a/3"]["win"] is not None
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/0"]["opts"],
name="tag/a/0",
),
call(
X=[5],
Y=[1.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/1"]["opts"],
name="tag/a/1",
),
call(
X=[5],
Y=[2.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/2"]["opts"],
name="tag/a/2",
),
call(
X=[5],
Y=[3.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a/3"]["opts"],
name="tag/a/3",
),
],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 1 and "tag/a" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert mock_logger.vis.line.call_count == 1
mock_logger.vis.line.assert_has_calls(
[
call(
X=[7],
Y=[55.56],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
],
any_order=True,
)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert len(wrapper.windows) == 2 and "tag/a" in wrapper.windows and "tag/b" in wrapper.windows
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert mock_logger.vis.line.call_count == 2
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
],
any_order=True,
)
def test_output_handler_both(dirname):
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 3
assert (
len(wrapper.windows) == 3
and "tag/a" in wrapper.windows
and "tag/b" in wrapper.windows
and "tag/loss" in wrapper.windows
)
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[5],
Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
call(
X=[5],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
),
],
any_order=True,
)
mock_engine.state.epoch = 6
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 6
assert (
len(wrapper.windows) == 3
and "tag/a" in wrapper.windows
and "tag/b" in wrapper.windows
and "tag/loss" in wrapper.windows
)
assert wrapper.windows["tag/a"]["win"] is not None
assert wrapper.windows["tag/b"]["win"] is not None
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[6],
Y=[12.23],
env=mock_logger.vis.env,
win=wrapper.windows["tag/a"]["win"],
update="append",
opts=wrapper.windows["tag/a"]["opts"],
name="tag/a",
),
call(
X=[6],
Y=[23.45],
env=mock_logger.vis.env,
win=wrapper.windows["tag/b"]["win"],
update="append",
opts=wrapper.windows["tag/b"]["opts"],
name="tag/b",
),
call(
X=[6],
Y=[12345],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
),
],
any_order=True,
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.0)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.vis.line.call_count == 4
assert (
len(wrapper.windows) == 4
and "tag/alpha" in wrapper.windows
and "tag/beta" in wrapper.windows
and "tag/gamma/0" in wrapper.windows
and "tag/gamma/1" in wrapper.windows
)
assert wrapper.windows["tag/alpha"]["win"] is not None
assert wrapper.windows["tag/beta"]["win"] is not None
assert wrapper.windows["tag/gamma/0"]["win"] is not None
assert wrapper.windows["tag/gamma/1"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[3.899],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/alpha"]["opts"],
name="tag/alpha",
),
call(
X=[5],
Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/beta"]["opts"],
name="tag/beta",
),
call(
X=[5],
Y=[21.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/gamma/0"]["opts"],
name="tag/gamma/0",
),
call(
X=[5],
Y=[6.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/gamma/1"]["opts"],
name="tag/gamma/1",
),
],
any_order=True,
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 1
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[10],
Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 1
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[mock_another_engine.state.epoch],
Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 2
assert len(wrapper.windows) == 1 and "tag/loss" in wrapper.windows
assert wrapper.windows["tag/loss"]["win"] is not None
mock_logger.vis.line.assert_has_calls(
[
call(
X=[mock_another_engine.state.epoch],
Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
opts=wrapper.windows["tag/loss"]["opts"],
name="tag/loss",
)
]
)
def test_weights_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
WeightsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
WeightsScalarHandler(model, reduction=123)
with pytest.raises(TypeError, match="Output of the reduction function should be a scalar"):
WeightsScalarHandler(model, reduction=lambda x: x)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsScalarHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
model = DummyModel()
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc1/weight"]["opts"],
name=tag_prefix + "weights_norm/fc1/weight",
),
call(
X=[5],
Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc1/bias"]["opts"],
name=tag_prefix + "weights_norm/fc1/bias",
),
call(
X=[5],
Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc2/weight"]["opts"],
name=tag_prefix + "weights_norm/fc2/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "weights_norm/fc2/bias"]["opts"],
name=tag_prefix + "weights_norm/fc2/bias",
),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_custom_reduction():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
model = DummyModel()
def norm(x):
return 12.34
wrapper = WeightsScalarHandler(model, reduction=norm, show_legend=True)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc1/weight"]["opts"],
name="weights_norm/fc1/weight",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc1/bias"]["opts"],
name="weights_norm/fc1/bias",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc2/weight"]["opts"],
name="weights_norm/fc2/weight",
),
call(
X=[5],
Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows["weights_norm/fc2/bias"]["opts"],
name="weights_norm/fc2/bias",
),
],
any_order=True,
)
def test_grads_scalar_handler_wrong_setup():
with pytest.raises(TypeError, match="Argument model should be of type torch.nn.Module"):
GradsScalarHandler(None)
model = MagicMock(spec=torch.nn.Module)
with pytest.raises(TypeError, match="Argument reduction should be callable"):
GradsScalarHandler(model, reduction=123)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsScalarHandler' works only with VisdomLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
mock_logger.executor = _DummyExecutor()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.vis.line.call_count == 4
mock_logger.vis.line.assert_has_calls(
[
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc1/weight"]["opts"],
name=tag_prefix + "grads_norm/fc1/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc1/bias"]["opts"],
name=tag_prefix + "grads_norm/fc1/bias",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc2/weight"]["opts"],
name=tag_prefix + "grads_norm/fc2/weight",
),
call(
X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
update=None,
opts=wrapper.windows[tag_prefix + "grads_norm/fc2/bias"]["opts"],
name=tag_prefix + "grads_norm/fc2/bias",
),
],
any_order=True,
)
_test()
_test(tag="tag")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_no_server():
with pytest.raises(ConnectionError, match="Error connecting to Visdom server"):
VisdomLogger()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_logger_init_hostname_port(visdom_server):
# Explicit hostname, port
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
assert "main" in vd_logger.vis.get_env_list()
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_logger_init_env_vars(visdom_server):
# As env vars
import os
os.environ["VISDOM_SERVER_URL"] = visdom_server[0]
os.environ["VISDOM_PORT"] = str(visdom_server[1])
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
assert "main" in vd_logger.vis.get_env_list()
vd_logger.close()
def _parse_content(content):
import json
return json.loads(content)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_no_executor(visdom_server):
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=0)
# close all windows in 'main' environment
vd_logger.vis.close()
n_epochs = 3
data = list(range(10))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_with_executor(visdom_server):
vd_logger = VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=1)
# close all windows in 'main' environment
vd_logger.vis.close()
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
vd_logger.close()
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_integration_with_executor_as_context_manager(visdom_server, visdom_server_stop):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with VisdomLogger(server=visdom_server[0], port=visdom_server[1], num_workers=1) as vd_logger:
# close all windows in 'main' environment
vd_logger.vis.close()
trainer = Engine(update_fn)
output_handler = OutputHandler(tag="training", output_transform=lambda x: {"loss": x})
vd_logger.attach(trainer, log_handler=output_handler, event_name=Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
assert len(output_handler.windows) == 1
assert "training/loss" in output_handler.windows
win_name = output_handler.windows["training/loss"]["win"]
data = vd_logger.vis.get_window_data(win=win_name)
data = _parse_content(data)
assert "content" in data and "data" in data["content"]
data = data["content"]["data"][0]
assert "x" in data and "y" in data
x_vals, y_vals = data["x"], data["y"]
assert all([int(x) == x_true for x, x_true in zip(x_vals, list(range(1, n_epochs * len(data) + 1)))])
assert all([y == y_true for y, y_true in zip(y_vals, losses)])
@pytest.mark.parametrize("no_site_packages", ["visdom"], indirect=True)
def test_no_visdom(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires visdom package"):
VisdomLogger()
def test_no_concurrent():
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires concurrent.futures"):
with patch.dict("sys.modules", {"concurrent.futures": None}):
VisdomLogger(num_workers=1)
|
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.wandb_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
WandBLogger,
)
from ignite.engine import Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with WandBLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"lr/group_0": 0.01}, step=123, sync=None)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"generator/lr/group_0": 0.01}, step=123, sync=None)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with WandBLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/output": 12345}, step=123, sync=None)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"another_tag/loss": 12345}, step=123, sync=None)
def test_output_handler_output_transform_sync():
wrapper = OutputHandler("tag", output_transform=lambda x: x, sync=False)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/output": 12345}, step=123, sync=False)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x}, sync=True)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"another_tag/loss": 12345}, step=123, sync=True)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 1, "b": 5})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 1, "tag/b": 5}, step=5, sync=None)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 55.56, "tag/c": "Some text"}, step=7, sync=None)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 12.23, "tag/b": 23.45}, step=5, sync=None)
# log a torch vector
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
vector = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.33])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": vector})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with({f"tag/a/{i}": vector[i].item() for i in range(5)}, step=5, sync=None)
# log warning
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": [1, 2, 3, 4]})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
with pytest.warns(UserWarning, match=r"Logger output_handler can not log metrics value type"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with({"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345}, step=5, sync=None)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with({"tag/loss": 12345}, step=10, sync=None)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log.assert_called_once_with(
{"tag/loss": mock_engine.state.output}, step=mock_another_engine.state.epoch, sync=None
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log.call_count == 2
mock_logger.log.assert_has_calls(
[call({"tag/loss": mock_engine.state.output}, step=mock_another_engine.state.epoch, sync=None)]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma", "delta"])
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
mock_engine.state.delta = "Some Text"
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log.assert_called_once_with(
{
"tag/alpha": 3.899,
"tag/beta": torch.tensor(12.21).item(),
"tag/gamma/0": 21.0,
"tag/gamma/1": 6.0,
"tag/delta": "Some Text",
},
step=5,
sync=None,
)
def test_wandb_close():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
mock_engine = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.close()
@pytest.mark.parametrize("no_site_packages", ["wandb"], indirect=True)
def test_no_wandb_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires wandb to be installed."):
WandBLogger()
def test_wandb_getattr():
import wandb
logger = WandBLogger(init=False)
assert wandb.log == logger.log
|
import os
from unittest.mock import call, MagicMock
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import (
global_step_from_engine,
OptimizerParamsHandler,
OutputHandler,
PolyaxonLogger,
)
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0})], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_output_handler_state_attrs():
wrapper = OutputHandler("tag", state_attributes=["alpha", "beta", "gamma"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 5
mock_engine.state.alpha = 3.899
mock_engine.state.beta = torch.tensor(12.21)
mock_engine.state.gamma = torch.tensor([21.0, 6.0])
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
**{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0}, step=5
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"test_value": global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
plx_logger.close()
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"test_value": global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.mark.parametrize("no_site_packages", ["polyaxon"], indirect=True)
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires polyaxon"):
PolyaxonLogger()
|
# -*- coding: utf-8 -*-
import sys
import time
from argparse import Namespace
from unittest.mock import patch
import numpy as np
import pytest
import torch
from packaging.version import Version
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import TerminateOnNan
from ignite.metrics import RunningAverage
if sys.platform.startswith("win"):
pytest.skip("Skip on Windows", allow_module_level=True)
def get_tqdm_version():
import tqdm
return Version(tqdm.__version__)
def update_fn(engine, batch):
a = 1
engine.state.metrics["a"] = a
return a
def test_pbar_errors():
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires tqdm to be installed"):
with patch.dict("sys.modules", {"tqdm.autonotebook": None}):
ProgressBar()
pbar = ProgressBar()
with pytest.raises(ValueError, match=r"Logging event abc is not in allowed"):
pbar.attach(Engine(lambda e, b: None), event_name=Namespace(name="abc"))
def test_pbar(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_file(tmp_path):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
file_path = tmp_path / "temp.txt"
file = open(str(file_path), "w+")
pbar = ProgressBar(file=file)
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
file.close() # Force a flush of the buffer. file.flush() does not work.
file = open(str(file_path), "r")
lines = file.readlines()
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]\n"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]\n"
assert lines[-2] == expected
def test_pbar_log_message(capsys):
pbar = ProgressBar()
pbar.log_message("test")
captured = capsys.readouterr()
out = captured.out.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
expected = "test"
assert out[-1] == expected
def test_pbar_log_message_file(tmp_path):
file_path = tmp_path / "temp.txt"
file = open(str(file_path), "w+")
pbar = ProgressBar(file=file)
pbar.log_message("test")
file.close() # Force a flush of the buffer. file.flush() does not work.
file = open(str(file_path), "r")
lines = file.readlines()
expected = "test\n"
assert lines[0] == expected
def test_attach_fail_with_string():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(TypeError):
pbar.attach(engine, "a")
def test_pbar_batch_indeces(capsys):
engine = Engine(lambda e, b: time.sleep(0.1))
@engine.on(Events.ITERATION_STARTED)
def print_iter(_):
print("iteration: ", engine.state.iteration)
ProgressBar(persist=True).attach(engine)
engine.run(list(range(4)), max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
printed_batch_indeces = set(map(lambda x: int(x.split("/")[0][-1]), err))
expected_batch_indeces = list(range(1, 5))
assert sorted(list(printed_batch_indeces)) == expected_batch_indeces
def test_pbar_with_metric(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
def step(engine, batch):
loss_value = next(loss_values)
return loss_value
trainer = Engine(step)
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names=["batchloss"])
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5 [00:00<?]"
assert actual == expected
def test_pbar_with_all_metric(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
another_loss_values = iter(range(1, n_iters + 1))
def step(engine, batch):
loss_value = next(loss_values)
another_loss_value = next(another_loss_values)
return loss_value, another_loss_value
trainer = Engine(step)
RunningAverage(alpha=0.5, output_transform=lambda x: x[0]).attach(trainer, "batchloss")
RunningAverage(alpha=0.5, output_transform=lambda x: x[1]).attach(trainer, "another batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names="all")
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5, another batchloss=1.5 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , batchloss=0.5, another batchloss=1.5 [00:00<?]"
assert actual == expected
def test_pbar_with_state_attrs(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
def step(engine, batch):
loss_value = next(loss_values)
return loss_value
trainer = Engine(step)
trainer.state.alpha = 3.899
trainer.state.beta = torch.tensor(12.21)
trainer.state.gamma = torch.tensor([21.0, 6.0])
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
pbar.attach(trainer, metric_names=["batchloss"], state_attributes=["alpha", "beta", "gamma"])
trainer.run(data=data, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = (
"Iteration: [1/2] 50%|█████ , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<00:00]"
)
else:
expected = (
"Iteration: [1/2] 50%|█████ , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<?]"
)
assert actual == expected
def test_pbar_no_metric_names(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ [00:00<?]"
assert actual == expected
def test_pbar_with_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: {"a": x})
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_fail_with_non_callable_transform():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(TypeError):
pbar.attach(engine, output_transform=1)
def test_pbar_with_scalar_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_with_str_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: "red")
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%|█████ , output=red [00:00<?]"
assert err[-1] == expected
def test_pbar_with_tqdm_kwargs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar(desc="My description: ")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "My description: [10/10]: [4/5] 80%|████████ , output=1 [00:00<00:00]"
assert err[-1] == expected
def test_pbar_for_validation(capsys):
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar(desc="Validation")
pbar.attach(engine)
engine.run(loader, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "Validation: [4/5] 80%|████████ [00:00<00:00]"
assert err[-1] == expected
def test_pbar_output_tensor(capsys):
def _test(out_tensor, out_msg):
loader = [1, 2, 3, 4, 5]
def update_fn(engine, batch):
return out_tensor
engine = Engine(update_fn)
pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=1)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = f"Output tensor: [4/5] 80%|████████ , {out_msg} [00:00<00:00]"
assert err[-1] == expected
_test(out_tensor=torch.tensor([5, 0]), out_msg="output_0=5, output_1=0")
_test(out_tensor=torch.tensor(123), out_msg="output=123")
_test(out_tensor=torch.tensor(1.234), out_msg="output=1.23")
def test_pbar_output_warning(capsys):
loader = [1, 2, 3, 4, 5]
def update_fn(engine, batch):
return torch.zeros(1, 2, 3, 4)
engine = Engine(update_fn)
pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
with pytest.warns(UserWarning):
engine.run(loader, max_epochs=1)
def test_pbar_on_epochs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch: [9/10] 90%|█████████ [00:00<00:00]"
assert actual == expected
def test_pbar_with_max_epochs_set_to_one(capsys):
n_epochs = 1
loader = [1, 2]
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, ["a"])
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%|█████ , a=1 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%|█████ , a=1 [00:00<?]"
assert err[-1] == expected
def test_pbar_wrong_events_order():
engine = Engine(update_fn)
pbar = ProgressBar()
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.EPOCH_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.COMPLETED, closing_event_name=Events.ITERATION_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.EPOCH_COMPLETED)
with pytest.raises(ValueError, match="should be called before closing event"):
pbar.attach(engine, event_name=Events.ITERATION_COMPLETED, closing_event_name=Events.ITERATION_STARTED)
with pytest.raises(ValueError, match="should not be a filtered event"):
pbar.attach(engine, event_name=Events.ITERATION_STARTED, closing_event_name=Events.EPOCH_COMPLETED(every=10))
def test_pbar_with_nan_input():
def update(engine, batch):
x = batch
return x.item()
def create_engine():
engine = Engine(update)
pbar = ProgressBar()
engine.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
pbar.attach(engine, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.COMPLETED)
return engine
data = torch.from_numpy(np.array([np.nan] * 25))
engine = create_engine()
engine.run(data)
assert engine.should_terminate
assert engine.state.iteration == 1
assert engine.state.epoch == 1
data = torch.from_numpy(np.array([1] * 1000 + [np.nan] * 25))
engine = create_engine()
engine.run(data)
assert engine.should_terminate
assert engine.state.iteration == 1001
assert engine.state.epoch == 1
def test_pbar_on_callable_events(capsys):
n_epochs = 1
loader = list(range(100))
engine = Engine(update_fn)
pbar = ProgressBar()
pbar.attach(engine, event_name=Events.ITERATION_STARTED(every=10), closing_event_name=Events.EPOCH_COMPLETED)
engine.run(loader, max_epochs=n_epochs)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [90/100] 90%|█████████ [00:00<00:00]"
assert actual == expected
def test_tqdm_logger_epoch_length(capsys):
loader = list(range(100))
engine = Engine(update_fn)
pbar = ProgressBar(persist=True)
pbar.attach(engine)
engine.run(loader, epoch_length=50)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [50/50] 100%|██████████ [00:00<00:00]"
assert actual == expected
def test_tqdm_logger_iter_without_epoch_length(capsys):
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
pass
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
pbar = ProgressBar(persist=True)
pbar.attach(trainer)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch [5/5]: [11/11] 100%|██████████ [00:00<00:00]"
assert actual == expected
|
import math
import os
from unittest.mock import ANY, call, MagicMock, patch
import pytest
import torch
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import Engine, Events, State
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with TensorboardLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_getattr_method():
# Create a mock SummaryWriter object
mock_writer = MagicMock()
# Assign the mock object to the writer attribute of a TensorboardLoggerinstance
logger = TensorboardLogger()
logger.writer = mock_writer
# Test that a method passed through the __getattr__ method calls thecorresponding method on the mock object
logger.add_scalar("loss", 0.5)
mock_writer.add_scalar.assert_called_once_with("loss", 0.5)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("lr/group_0", 0.01, 123)
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("generator/lr/group_0", 0.01, 123)
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("tag/output", 12345, 123)
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("another_tag/loss", 12345, 123)
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 4
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5)],
any_order=True,
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 55.56, 7)], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.tensor(12.23), "b": torch.tensor(23.45)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5)], any_order=True
)
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 3
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/a", 12.23, 5), call("tag/b", 23.45, 5), call("tag/loss", 12345, 5)], any_order=True
)
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls(
[call("tag/loss", mock_engine.state.output, mock_another_engine.state.epoch)]
)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
mock_logger.writer.add_scalar.assert_has_calls([call("tag/loss", 12345, 10)])
def test_weights_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsScalarHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_scalar_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsScalarHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_scalar.call_count == 4
mock_logger.writer.add_scalar.assert_has_calls(
[
call(tag_prefix + "weights_norm/fc1/weight", 0.0, 5),
call(tag_prefix + "weights_norm/fc1/bias", 0.0, 5),
call(tag_prefix + "weights_norm/fc2/weight", 12.0, 5),
call(tag_prefix + "weights_norm/fc2/bias", pytest.approx(math.sqrt(12.0)), 5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_scalar_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsScalarHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("weights_norm/fc2/weight", 12.0, 5)
mock_logger.writer.reset_mock()
wrapper = WeightsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/weights_norm/fc1/weight", 0.0, 5),
call("model/weights_norm/fc1/bias", 0.0, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/weights_norm/fc1/bias", 0.0, 5),
call("model/weights_norm/fc2/bias", pytest.approx(math.sqrt(12.0)), 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
def test_weights_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = WeightsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'WeightsHistHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_weights_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = WeightsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_histogram.call_count == 4
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag=tag_prefix + "weights/fc1/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc1/bias", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc2/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "weights/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_weights_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = WeightsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_called_once_with(tag="weights/fc2/weight", values=ANY, global_step=5)
mock_logger.writer.reset_mock()
wrapper = WeightsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/weights/fc1/weight", values=ANY, global_step=5),
call(tag="model/weights/fc1/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = WeightsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/weights/fc1/bias", values=ANY, global_step=5),
call(tag="model/weights/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
def test_grads_scalar_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsScalarHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsScalarHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_scalar_handler(dummy_model_factory, norm_mock):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsScalarHandler(model, reduction=norm_mock, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
norm_mock.reset_mock()
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
mock_logger.writer.add_scalar.assert_has_calls(
[
call(tag_prefix + "grads_norm/fc1/weight", ANY, 5),
call(tag_prefix + "grads_norm/fc1/bias", ANY, 5),
call(tag_prefix + "grads_norm/fc2/weight", ANY, 5),
call(tag_prefix + "grads_norm/fc2/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 4
assert norm_mock.call_count == 4
_test()
_test(tag="tag")
def test_grads_scalar_handler_whitelist(dummy_model_factory, norm_mock):
model = dummy_model_factory()
wrapper = GradsScalarHandler(model, reduction=norm_mock, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_called_once_with("grads_norm/fc2/weight", ANY, 5)
mock_logger.writer.reset_mock()
wrapper = GradsScalarHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/grads_norm/fc1/weight", ANY, 5),
call("model/grads_norm/fc1/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsScalarHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
[
call("model/grads_norm/fc1/bias", ANY, 5),
call("model/grads_norm/fc2/bias", ANY, 5),
],
any_order=True,
)
assert mock_logger.writer.add_scalar.call_count == 2
def test_grads_hist_handler_wrong_setup():
model = MagicMock(spec=torch.nn.Module)
wrapper = GradsHistHandler(model)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'GradsHistHandler' works only with TensorboardLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_grads_hist_handler(dummy_model_factory):
model = dummy_model_factory(with_grads=True, with_frozen_layer=False)
# define test wrapper to test with and without optional tag
def _test(tag=None):
wrapper = GradsHistHandler(model, tag=tag)
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
tag_prefix = f"{tag}/" if tag else ""
assert mock_logger.writer.add_histogram.call_count == 4
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag=tag_prefix + "grads/fc1/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc1/bias", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc2/weight", values=ANY, global_step=5),
call(tag=tag_prefix + "grads/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
_test()
_test(tag="tag")
def test_grads_hist_handler_whitelist(dummy_model_factory):
model = dummy_model_factory()
wrapper = GradsHistHandler(model, whitelist=["fc2.weight"])
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_called_once_with(tag="grads/fc2/weight", values=ANY, global_step=5)
mock_logger.writer.reset_mock()
wrapper = GradsHistHandler(model, tag="model", whitelist=["fc1"])
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/grads/fc1/weight", values=ANY, global_step=5),
call(tag="model/grads/fc1/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
mock_logger.writer.reset_mock()
def weight_selector(n, _):
return "bias" in n
wrapper = GradsHistHandler(model, tag="model", whitelist=weight_selector)
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_histogram.assert_has_calls(
[
call(tag="model/grads/fc1/bias", values=ANY, global_step=5),
call(tag="model/grads/fc2/bias", values=ANY, global_step=5),
],
any_order=True,
)
assert mock_logger.writer.add_histogram.call_count == 2
def test_integration(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
tb_logger = TensorboardLogger(log_dir=dirname)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.writer.add_scalar("test_value", global_step, global_step)
tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
tb_logger.close()
# Check if event files are present
written_files = os.listdir(dirname)
written_files = [f for f in written_files if "tfevents" in f]
assert len(written_files) > 0
def test_integration_as_context_manager(dirname):
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with TensorboardLogger(log_dir=dirname) as tb_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.writer.add_scalar("test_value", global_step, global_step)
tb_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
# Check if event files are present
written_files = os.listdir(dirname)
written_files = [f for f in written_files if "tfevents" in f]
assert len(written_files) > 0
def test_no_tensorboardX_package(dirname):
from torch.utils.tensorboard import SummaryWriter
with patch.dict("sys.modules", {"tensorboardX": None}):
tb_logger = TensorboardLogger(log_dir=dirname)
assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer)
tb_logger.close()
def test_no_torch_utils_tensorboard_package(dirname):
from tensorboardX import SummaryWriter
with patch.dict("sys.modules", {"torch.utils.tensorboard": None}):
tb_logger = TensorboardLogger(log_dir=dirname)
assert isinstance(tb_logger.writer, SummaryWriter), type(tb_logger.writer)
tb_logger.close()
def test_no_tensorboardX_nor_torch_utils_tensorboard():
with patch.dict("sys.modules", {"tensorboardX": None, "torch.utils.tensorboard": None}):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires either tensorboardX or torch"):
TensorboardLogger(log_dir=None)
|
import os
import random
import sys
from collections.abc import Mapping
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
import ignite.distributed as idist
from ignite.engine import Events
from ignite.engine.deterministic import (
_set_rng_states,
DeterministicEngine,
keep_random_state,
ReproducibleBatchSampler,
update_dataloader,
)
from ignite.utils import manual_seed
from tests.ignite.engine import BatchChecker, setup_sampler
def test_dengine_setup_seed_div_by_zero():
with pytest.raises(ValueError, match=r"iter_counter should be positive value"):
DeterministicEngine(lambda e, b: None)._setup_seed(iter_counter=0)
def test_update_dataloader():
def _test(sampler_type=None):
num_epochs = 3
total_batch_size = 4
num_iters = 17
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
num_workers = 2
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
torch.manual_seed(12)
seen_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in dataloader:
t.append(b)
seen_batches.append(t)
sampler, batch_size = setup_sampler(sampler_type, num_iters, total_batch_size)
dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=False,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
batch_sampler = dataloader.batch_sampler
new_dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(batch_sampler))
torch.manual_seed(12)
new_batches = []
for i in range(num_epochs):
t = []
if sampler_type == "distributed":
sampler.set_epoch(i)
for b in new_dataloader:
t.append(b)
new_batches.append(t)
for i in range(num_epochs):
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], new_batches[i])])
_test()
_test("weighted")
_test("distributed")
def test_reproducible_batch_sampler_wrong_input():
with pytest.raises(TypeError, match=r"Argument batch_sampler should be torch.utils.data.sampler.BatchSampler"):
ReproducibleBatchSampler("abc")
def test_reproducible_batch_sampler():
data = list(range(100))
dataloader = DataLoader(data, batch_size=12, num_workers=0, shuffle=True, drop_last=True)
torch.manual_seed(12 + 0)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
seen_batches = []
num_epochs = 3
for i in range(num_epochs):
t = []
for b in dataloader_:
t.append(b)
seen_batches.append(t)
torch.manual_seed(12 + i + 1)
for i in range(num_epochs - 1):
for j in range(i + 1, num_epochs):
assert not all([(b1 == b2).all() for b1, b2 in zip(seen_batches[i], seen_batches[j])])
for resume_epoch in range(num_epochs):
torch.manual_seed(12 + resume_epoch)
dataloader_ = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
resumed_seen_batches = []
for b in dataloader_:
resumed_seen_batches.append(b)
assert all([(b1 == b2).all() for b1, b2 in zip(seen_batches[resume_epoch], resumed_seen_batches)])
def _test_keep_random_state(with_numpy):
manual_seed(54)
true_values = []
for _ in range(5):
t = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
t.append(torch.from_numpy(np.random.rand(2)))
true_values.append(t)
@keep_random_state
def user_handler():
manual_seed(22)
_ = [
random.random(),
torch.rand(2),
]
if with_numpy:
_ = np.random.rand(2)
manual_seed(54)
res_values = []
for _ in range(5):
r = [
torch.tensor([random.random()]),
torch.rand(2),
]
if with_numpy:
r.append(torch.from_numpy(np.random.rand(2)))
res_values.append(r)
user_handler()
for a, b in zip(true_values, res_values):
for i, j in zip(a, b):
assert (i == j).all()
def test_keep_random_state():
_test_keep_random_state(with_numpy=True)
def test_keep_random_state_without_numpy():
with patch.dict("sys.modules", {"numpy": None}):
_test_keep_random_state(with_numpy=False)
def test_strict_resume_from_iter():
def _test(epoch_length=None):
max_epochs = 5
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 4):
batch_checker = BatchChecker(data, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
@engine.on(Events.EPOCH_COMPLETED)
def check_iteration(_):
assert engine.state.iteration == batch_checker.counter
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_strict_resume_from_epoch():
def _test(epoch_length=None):
max_epochs = 10
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
batch_checker = BatchChecker(data, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
engine.run(data)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def _test_resume_random_dataloader_from_epoch(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 5
total_batch_size = 4
num_iters = 21
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs, 2):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
torch.manual_seed(87)
engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
assert batch_checker.check(
batch
), f"{num_workers} {resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(87)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
if sampler_type != "distributed":
_test(60)
_test(15)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_epoch():
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler)
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_epoch("cpu", setup_sampler, sampler_type="distributed")
class AugmentedData:
def __init__(self, data, enabled=True):
self.data = data
self.enabled = enabled
def __getitem__(self, i):
dp = self.data[i]
r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0
return dp + r * 0.01
def __len__(self):
return len(self.data)
def _test_resume_random_dataloader_from_iter(device, _setup_sampler, sampler_type=None):
def _test(epoch_length=None):
max_epochs = 3
total_batch_size = 4
num_iters = 17
torch.manual_seed(0)
data = torch.randint(0, 1000, size=(num_iters * total_batch_size,))
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(2, min(num_iters * max_epochs, epoch_length * max_epochs), 13):
for num_workers in [0, 2]:
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
orig_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
torch.manual_seed(12)
engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
sampler, batch_size = _setup_sampler(sampler_type, num_iters, total_batch_size)
resume_dataloader = DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
sampler=sampler,
drop_last=True,
shuffle=sampler is None,
)
def update_fn(_, batch):
batch_to_device = batch.to(device)
cfg_msg = f"{num_workers} {resume_iteration}"
assert batch_checker.check(
batch
), f"{cfg_msg} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
if sampler_type == "distributed":
@engine.on(Events.EPOCH_STARTED)
def _(engine):
sampler.set_epoch(engine.state.epoch)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(12)
engine.run(resume_dataloader)
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{num_workers}, {resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
if sampler_type != "distributed":
_test(40)
_test(11)
@pytest.mark.skipif("win" in sys.platform, reason="Skip extremely slow test on Windows/MacOSX")
def test_resume_random_dataloader_from_iter():
_test_resume_random_dataloader_from_iter("cpu", setup_sampler)
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="weighted")
_test_resume_random_dataloader_from_iter("cpu", setup_sampler, sampler_type="distributed")
def _test_resume_random_data_iterator_from_epoch(device):
def _test(epoch_length=None):
max_epochs = 5
batch_size = 4
num_iters = 21
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_epoch in range(1, max_epochs):
seen_batchs = []
def update_fn(_, batch):
# if there is a random op when using data batch etc, we can not resume correctly
# torch.rand(1)
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(121)
engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_epoch} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
epoch=resume_epoch, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(121)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert engine.state.iteration == epoch_length * max_epochs
_test()
_test(60)
_test(15)
def test_resume_random_data_iterator_from_epoch():
_test_resume_random_data_iterator_from_epoch("cpu")
def _test_resume_random_data_iterator_from_iter(device):
def _test(epoch_length=None):
max_epochs = 3
batch_size = 4
num_iters = 17
def infinite_data_iterator():
while True:
for _ in range(num_iters):
data = torch.randint(0, 1000, size=(batch_size,), device=device)
yield data
if epoch_length is None:
epoch_length = num_iters
for resume_iteration in range(1, min(num_iters * max_epochs, epoch_length * max_epochs), 7):
seen_batchs = []
def update_fn(_, batch):
seen_batchs.append(batch)
engine = DeterministicEngine(update_fn)
torch.manual_seed(24)
engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
def update_fn(_, batch):
assert batch_checker.check(
batch
), f"{resume_iteration} | {batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = DeterministicEngine(update_fn)
resume_state_dict = dict(
iteration=resume_iteration, max_epochs=max_epochs, epoch_length=epoch_length, rng_states=None
)
engine.load_state_dict(resume_state_dict)
torch.manual_seed(24)
engine.run(infinite_data_iterator())
assert engine.state.epoch == max_epochs
assert (
engine.state.iteration == epoch_length * max_epochs
), f"{resume_iteration} | {engine.state.iteration} vs {epoch_length * max_epochs}"
_test()
_test(50)
_test(11)
def test_resume_random_data_iterator_from_iter():
_test_resume_random_data_iterator_from_iter("cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.xfail
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_resume_random_dataloader_from_iter(device, setup_sampler, sampler_type="distributed")
_test_resume_random_dataloader_from_epoch(device, setup_sampler, sampler_type="distributed")
def test_concepts_snippet_resume():
# Commented imports required in the snippet
# import torch
# from torch.utils.data import DataLoader
# from ignite.engine import DeterministicEngine
# from ignite.utils import manual_seed
seen_batches = []
manual_seed(seed=15)
def random_train_data_loader(size):
data = torch.arange(0, size)
return DataLoader(data, batch_size=4, shuffle=True)
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
seen_batches.append(batch)
trainer = DeterministicEngine(print_train_data)
print("Original Run")
manual_seed(56)
trainer.run(random_train_data_loader(40), max_epochs=2, epoch_length=5)
original_batches = list(seen_batches)
seen_batches = []
print("Resumed Run")
trainer.load_state_dict({"epoch": 1, "epoch_length": 5, "max_epochs": 2, "rng_states": None})
manual_seed(56)
trainer.run(random_train_data_loader(40))
resumed_batches = list(seen_batches)
seen_batches = []
for b1, b2 in zip(original_batches[5:], resumed_batches):
assert (b1 == b2).all()
def test_concepts_snippet_warning():
def random_train_data_generator():
while True:
yield torch.randint(0, 100, size=(1,))
def print_train_data(engine, batch):
i = engine.state.iteration
e = engine.state.epoch
print("train", e, i, batch.tolist())
trainer = DeterministicEngine(print_train_data)
@trainer.on(Events.ITERATION_COMPLETED(every=3))
def user_handler(_):
# handler synchronizes the random state
torch.manual_seed(12)
a = torch.rand(1)
trainer.run(random_train_data_generator(), max_epochs=3, epoch_length=5)
def _test_gradients_on_resume(
dirname, device, with_dropout=True, with_dataaugs=True, data_size=24, batch_size=4, save_iter=None, save_epoch=None
):
debug = False
def random_train_data_loader(size):
d = AugmentedData(torch.rand(size, 3, 32, 32), enabled=with_dataaugs)
return DataLoader(d, batch_size=batch_size, shuffle=True, num_workers=2)
def _train(save_iter=None, save_epoch=None, sd=None):
w_norms = []
grad_norms = []
data = []
chkpt = []
manual_seed(12)
arch = [
nn.Conv2d(3, 10, 3),
nn.ReLU(),
nn.Conv2d(10, 10, 3),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2),
]
if with_dropout:
arch.insert(2, nn.Dropout2d())
arch.insert(-2, nn.Dropout())
model = nn.Sequential(*arch).to(device)
opt = SGD(model.parameters(), lr=0.001)
def proc_fn(e, b):
from ignite.engine.deterministic import _get_rng_states, _repr_rng_state
s = _repr_rng_state(_get_rng_states())
model.train()
opt.zero_grad()
y = model(b.to(device))
y.sum().backward()
opt.step()
if debug:
print(
trainer.state.iteration, trainer.state.epoch, "proc_fn - b.shape", b.shape, torch.norm(y).item(), s
)
trainer = DeterministicEngine(proc_fn)
if save_iter is not None:
ev = Events.ITERATION_COMPLETED(once=save_iter)
elif save_epoch is not None:
ev = Events.EPOCH_COMPLETED(once=save_epoch)
save_iter = save_epoch * (data_size // batch_size)
@trainer.on(ev)
def save_chkpt(_):
if debug:
print(trainer.state.iteration, "save_chkpt")
fp = dirname / "test.pt"
from ignite.engine.deterministic import _repr_rng_state
tsd = trainer.state_dict()
if debug:
print("->", _repr_rng_state(tsd["rng_states"]))
torch.save([model.state_dict(), opt.state_dict(), tsd], fp)
chkpt.append(fp)
def log_event_filter(_, event):
if (event // save_iter == 1) and 1 <= (event % save_iter) <= 5:
return True
return False
@trainer.on(Events.ITERATION_COMPLETED(event_filter=log_event_filter))
def write_data_grads_weights(e):
x = e.state.batch
i = e.state.iteration
data.append([i, x.mean().item(), x.std().item()])
total = [0.0, 0.0]
out1 = []
out2 = []
for p in model.parameters():
n1 = torch.norm(p).item()
n2 = torch.norm(p.grad).item()
out1.append(n1)
out2.append(n2)
total[0] += n1
total[1] += n2
w_norms.append([i, total[0]] + out1)
grad_norms.append([i, total[1]] + out2)
if sd is not None:
sd = torch.load(sd)
model.load_state_dict(sd[0])
opt.load_state_dict(sd[1])
from ignite.engine.deterministic import _repr_rng_state
if debug:
print("-->", _repr_rng_state(sd[2]["rng_states"]))
trainer.load_state_dict(sd[2])
manual_seed(32)
trainer.run(random_train_data_loader(size=data_size), max_epochs=5)
return {"sd": chkpt, "data": data, "grads": grad_norms, "weights": w_norms}
out_original = _train(save_iter=save_iter, save_epoch=save_epoch)
assert len(out_original["sd"]) > 0
out_resumed = _train(save_iter=save_iter, save_epoch=save_epoch, sd=out_original["sd"][0])
if debug:
print("Original:")
print(" data:", out_original["data"])
print("grads:", out_original["grads"])
print(" W:", out_original["weights"])
print("Resume:")
print(" data:", out_resumed["data"])
print("grads:", out_resumed["grads"])
print(" W:", out_resumed["weights"])
# check data:
for d1, d2 in zip(out_original["data"], out_resumed["data"]):
assert d1 == d2
# check grads:
for d1, d2 in zip(out_original["grads"], out_resumed["grads"]):
assert d1 == d2
# check weights:
for d1, d2 in zip(out_original["weights"], out_resumed["weights"]):
assert d1 == d2
def test_gradients_on_resume_cpu(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_iter=25)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cpu", with_dataaugs=False, save_epoch=3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gradients_on_resume_on_cuda(dirname):
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_iter=25)
with pytest.raises(AssertionError):
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_iter=25)
# resume from epoch
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=True, save_epoch=3)
_test_gradients_on_resume(dirname, "cuda", with_dataaugs=False, save_epoch=3)
def test_engine_with_dataloader_no_auto_batching():
# tests https://github.com/pytorch/ignite/issues/941
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
print(f"{e.state.epoch}-{e.state.iteration}: {b}")
counter[0] += 1
engine = DeterministicEngine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = DeterministicEngine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
class OldDataLoader(DataLoader):
def __init__(self, dl, *args, **kwargs):
self.dl = dl
self.sampler = self.dl.sampler
self.batch_sampler = self.dl.batch_sampler
def __len__(self):
return len(self.dl)
def __iter__(self):
return iter(self.dl)
def test_dataloader_no_dataset_kind():
# tests issue : https://github.com/pytorch/ignite/issues/1022
engine = DeterministicEngine(lambda e, b: None)
data = torch.randint(0, 1000, size=(100 * 4,))
dataloader = DataLoader(data, batch_size=4)
dataloader = OldDataLoader(dataloader)
engine.run(dataloader)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test__set_rng_states_cuda():
# Checks https://github.com/pytorch/ignite/issues/2076
rng_states = [random.getstate(), torch.get_rng_state().cuda(), np.random.get_state()]
_set_rng_states(rng_states)
assert rng_states[1].device.type == "cpu"
def test_engine_no_data_asserts():
trainer = DeterministicEngine(lambda e, b: None)
with pytest.raises(ValueError, match=r"Deterministic engine does not support the option of data=None"):
trainer.run(max_epochs=10, epoch_length=10)
def test_state_dict():
engine = DeterministicEngine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == 4
assert "iteration" in sd and sd["iteration"] == 0
assert "max_epochs" in sd and sd["max_epochs"] is None
assert "epoch_length" in sd and sd["epoch_length"] is None
assert "rng_states" in sd and sd["rng_states"] is not None
|
import os
import time
from unittest.mock import call, MagicMock, Mock
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
class RecordedEngine(Engine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.called_events = []
def _fire_event(self, event_name, *event_args, **event_kwargs):
self.called_events.append((self.state.epoch, self.state.iteration, event_name.name))
return super()._fire_event(event_name, *event_args, **event_kwargs)
def _create_mock_data_loader(epochs, batches_per_epoch):
batches = [MagicMock()] * batches_per_epoch
data_loader_manager = MagicMock()
batch_iterators = [iter(batches) for _ in range(epochs)]
data_loader_manager.__iter__.side_effect = batch_iterators
data_loader_manager.__len__.return_value = batches_per_epoch
return data_loader_manager
@pytest.mark.parametrize("interrupt_resume_enabled", [False, True])
class TestEngine:
@pytest.fixture(autouse=True)
def set_interrupt_resume_enabled(self, interrupt_resume_enabled):
Engine.interrupt_resume_enabled = interrupt_resume_enabled
def test_terminate(self):
engine = Engine(lambda e, b: 1)
assert not engine.should_terminate
engine.terminate()
assert engine.should_terminate
def test_invalid_process_raises_with_invalid_signature(self):
with pytest.raises(ValueError, match=r"Engine must be given a processing function in order to run"):
Engine(None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda batch: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda engine, batch, extra_arg: None)
def test_invalid_input_data(self):
engine = Engine(lambda e, b: None)
def data():
pass
with pytest.raises(TypeError, match=r"Argument data should be iterable"):
engine.run(data)
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_current_epoch_counter_increases_every_epoch(self, data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = EpochCounter()
engine.add_event_handler(Events.EPOCH_STARTED, counter)
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
counter.current_epoch_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2, 3]])
def test_current_iteration_counter_increases_every_iteration(self, data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = IterationCounter()
engine.add_event_handler(Events.ITERATION_STARTED, counter)
epoch_length = 3
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
counter.current_iteration_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
def test_stopping_criterion_is_max_epochs(self):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_terminate_at_end_of_epoch_stops_run(self, data):
max_epochs = 5
last_epoch_to_run = 3
engine = Engine(MagicMock(return_value=1))
def end_of_epoch_handler(engine):
if engine.state.epoch == last_epoch_to_run:
engine.terminate()
engine.add_event_handler(Events.EPOCH_COMPLETED, end_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == last_epoch_to_run
assert engine.should_terminate
assert engine._dataloader_iter is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_at_start_of_epoch(self, data, epoch_length):
max_epochs = 5
epoch_to_terminate_on = 3
real_epoch_length = epoch_length if data is None else len(data)
engine = Engine(MagicMock(return_value=1))
def start_of_epoch_handler(engine):
if engine.state.epoch == epoch_to_terminate_on:
engine.terminate()
engine.add_event_handler(Events.EPOCH_STARTED, start_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# epoch is not completed so counter is not incremented
assert state.epoch == epoch_to_terminate_on
assert engine.should_terminate
assert engine._dataloader_iter is None
assert state.iteration == ((epoch_to_terminate_on - 1) * real_epoch_length)
# Engine continue from epoch_to_terminate_on until max_epochs
first_epoch_iter = [None, None]
@engine.on(Events.STARTED)
def check_iter_epoch():
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
if data is not None:
expected_data_iter = iter(data)
expected_iter = state.iteration
@engine.on(Events.ITERATION_STARTED)
def check_iter_and_data():
nonlocal expected_data_iter, expected_iter
expected_iter += 1
assert engine.state.iteration == expected_iter
try:
assert engine.state.batch == next(expected_data_iter)
except StopIteration:
expected_data_iter = iter(data)
assert engine.state.batch == next(expected_data_iter)
first_epoch_iter[0], first_epoch_iter[1] = state.epoch, state.iteration
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.epoch == max_epochs
assert not engine.should_terminate
assert engine._dataloader_iter is None
# As terminated epoch is skipped -> iterations are not incremented
assert state.iteration == real_epoch_length * (max_epochs - 1)
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_stops_run_mid_epoch(self, data, epoch_length):
max_epochs = 5
iteration_to_stop = 13
real_epoch_length = epoch_length if data is None else len(data)
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate()
@engine.on(Events.EXCEPTION_RAISED)
def assert_no_exceptions(ee):
assert False, f"Engine should terminate without raising an exception, got '{type(ee)}'"
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
assert state.iteration == iteration_to_stop
assert state.epoch == np.ceil(iteration_to_stop / real_epoch_length) # it starts from 0
assert engine._dataloader_iter is None
# Engine continue from epoch_to_terminate_on until max_epochs
first_epoch_iter = [None, None]
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED, first_epoch_iter)
def check_iter_epoch(first_epoch_iter):
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
num_calls_check_iter_epoch += 1
if data is not None:
expected_iter = state.iteration
@engine.on(Events.ITERATION_STARTED)
def check_iter_and_data():
nonlocal expected_iter
expected_iter += 1
assert engine.state.iteration == expected_iter
assert engine.state.batch == data[(expected_iter - first_epoch_iter[1] - 1) % len(data)]
first_epoch_iter[0], first_epoch_iter[1] = state.epoch, state.iteration
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.epoch == max_epochs
assert not engine.should_terminate
assert state.iteration == real_epoch_length * (max_epochs - 1) + (iteration_to_stop % real_epoch_length)
assert num_calls_check_iter_epoch == 1
@pytest.mark.parametrize(
"terminate_event, e, i",
[
(Events.STARTED, 0, 0),
(Events.EPOCH_STARTED(once=2), 2, None),
(Events.EPOCH_COMPLETED(once=2), 2, None),
(Events.GET_BATCH_STARTED(once=12), None, 12),
(Events.GET_BATCH_COMPLETED(once=12), None, 12),
(Events.ITERATION_STARTED(once=14), None, 14),
(Events.ITERATION_COMPLETED(once=14), None, 14),
],
)
def test_terminate_events_sequence(self, terminate_event, e, i):
engine = RecordedEngine(MagicMock(return_value=1))
data = range(10)
max_epochs = 5
@engine.on(terminate_event)
def call_terminate():
engine.terminate()
@engine.on(Events.EXCEPTION_RAISED)
def assert_no_exceptions(ee):
assert False, f"Engine should terminate without raising an exception, got '{type(ee)}'"
engine.run(data, max_epochs=max_epochs)
if i is None:
if terminate_event == Events.EPOCH_STARTED:
i = len(data) * (e - 1)
else:
i = len(data) * e
if e is None:
e = i // len(data) + 1
assert engine.called_events[0] == (0, 0, Events.STARTED)
assert engine.called_events[-1] == (e, i, Events.COMPLETED)
assert engine.called_events[-2] == (e, i, Events.TERMINATE)
assert engine.called_events[-3] == (e, i, terminate_event)
assert engine._dataloader_iter is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_terminate_epoch_stops_mid_epoch(self, data, epoch_length):
real_epoch_length = epoch_length if data is None else len(data)
iteration_to_stop = real_epoch_length + 4
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate_epoch()
max_epochs = 3
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
true_value = real_epoch_length * (max_epochs - 1) + iteration_to_stop % real_epoch_length
assert state.iteration == true_value
assert state.epoch == max_epochs
@pytest.mark.parametrize(
"terminate_epoch_event, i",
[
(Events.GET_BATCH_STARTED(once=12), 12),
(Events.GET_BATCH_COMPLETED(once=12), 12),
(Events.ITERATION_STARTED(once=14), 14),
(Events.ITERATION_COMPLETED(once=14), 14),
],
)
def test_terminate_epoch_events_sequence(self, terminate_epoch_event, i):
engine = RecordedEngine(MagicMock(return_value=1))
data = range(10)
max_epochs = 3
# TODO: Bug: Events.GET_BATCH_STARTED(once=12) is called twice !
# prevent call_terminate_epoch to be called twice
call_count = 0
@engine.on(terminate_epoch_event)
def call_terminate_epoch():
nonlocal call_count
if call_count < 1:
engine.terminate_epoch()
call_count += 1
@engine.on(Events.TERMINATE_SINGLE_EPOCH)
def check_previous_events(iter_counter):
e = i // len(data) + 1
assert engine.called_events[0] == (0, 0, Events.STARTED)
assert engine.called_events[-2] == (e, i, terminate_epoch_event)
assert engine.called_events[-1] == (e, i, Events.TERMINATE_SINGLE_EPOCH)
@engine.on(Events.EPOCH_COMPLETED)
def check_previous_events2():
e = i // len(data) + 1
if e == engine.state.epoch and i == engine.state.iteration:
assert engine.called_events[-3] == (e, i, terminate_epoch_event)
assert engine.called_events[-2] == (e, i, Events.TERMINATE_SINGLE_EPOCH)
assert engine.called_events[-1] == (e, i, Events.EPOCH_COMPLETED)
engine.run(data, max_epochs=max_epochs)
assert engine.state.epoch == max_epochs
assert (max_epochs - 1) * len(data) < engine.state.iteration < max_epochs * len(data)
@pytest.mark.parametrize("data", [None, "mock_data_loader"])
def test_iteration_events_are_fired(self, data):
max_epochs = 5
num_batches = epoch_length = 3
if isinstance(data, str) and data == "mock_data_loader":
data = _create_mock_data_loader(max_epochs, num_batches)
epoch_length = None
engine = Engine(MagicMock(return_value=1))
mock_manager = Mock()
iteration_started = Mock()
engine.add_event_handler(Events.ITERATION_STARTED, iteration_started)
iteration_complete = Mock()
engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete)
mock_manager.attach_mock(iteration_started, "iteration_started")
mock_manager.attach_mock(iteration_complete, "iteration_complete")
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert iteration_started.call_count == num_batches * max_epochs
assert iteration_complete.call_count == num_batches * max_epochs
expected_calls = []
for _ in range(max_epochs * num_batches):
expected_calls.append(call.iteration_started(engine))
expected_calls.append(call.iteration_complete(engine))
assert mock_manager.mock_calls == expected_calls
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_last_event_name(self, data):
engine = Engine(MagicMock(return_value=1))
assert engine.last_event_name is None
@engine.on(Events.STARTED)
def _(_engine):
assert _engine.last_event_name == Events.STARTED
@engine.on(Events.EPOCH_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_STARTED
@engine.on(Events.ITERATION_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_STARTED
@engine.on(Events.ITERATION_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_COMPLETED
@engine.on(Events.EPOCH_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_COMPLETED
epoch_length = 2 if data is None else None
engine.run(data, epoch_length=epoch_length)
assert engine.last_event_name == Events.COMPLETED
def test_reset_should_terminate(self):
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
@engine.on(Events.ITERATION_COMPLETED)
def terminate_on_iteration_10(engine):
if engine.state.iteration == 10:
engine.terminate()
engine.run([0] * 20)
assert engine.state.iteration == 10
engine.run([0] * 20)
assert engine.state.iteration == 10
def test_batch_values(self):
def _test(data):
# This test check the content passed to update function
counter = [0]
num_iters = len(data)
def update_fn(_, batch):
assert batch == data[counter[0] % num_iters]
counter[0] += 1
engine = Engine(update_fn)
engine.run(data, max_epochs=10)
data = torch.randint(0, 1000, size=(256,))
_test(data)
def test_state_repr(self):
data = [0, 1, 2, 3, 4, 5]
max_epochs = 1
metrics = {"accuracy": Mock()}
state = State(dataloader=data, max_epochs=max_epochs, metrics=metrics)
s = repr(state)
assert "iteration" in s
assert "epoch" in s
assert "max_epochs: 1" in s
assert "dataloader" in s
assert "metrics" in s
assert "output" in s
assert "batch" in s
def test_alter_batch(self):
small_shape = (1, 2, 2)
large_shape = (1, 3, 3)
small_loader = torch.randint(0, 256, size=(30,) + small_shape)
large_loader = torch.randint(0, 256, size=(20,) + large_shape)
switch_iteration = 50
def should_take_large_img(i):
return i >= switch_iteration
def update_fn(engine, batch):
i = engine.state.iteration
if i < switch_iteration:
assert batch.shape == small_shape
assert (small_loader[(i - 1) % len(small_loader), ...] == batch).all()
else:
assert batch.shape == large_shape
assert (large_loader[(i - switch_iteration) % len(large_loader), ...] == batch).all()
trainer = Engine(update_fn)
def cycle(seq):
while True:
for i in seq:
yield i
small_loader_iter = cycle(small_loader)
large_loader_iter = cycle(large_loader)
@trainer.on(Events.ITERATION_STARTED)
def choose_batch(engine):
i = engine.state.iteration
if should_take_large_img(i):
batch = next(large_loader_iter)
else:
batch = next(small_loader_iter)
engine.state.batch = batch
num_epochs = 5
num_iters = 25
data = range(num_iters)
trainer.run(data, num_epochs)
def test__is_done(self):
state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
assert not Engine._is_done(state)
state = State(iteration=1000, max_epochs=10, epoch_length=100)
assert Engine._is_done(state)
def test__setup_engine(self):
engine = Engine(lambda e, b: 1)
engine.state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
data = list(range(100))
engine.state.dataloader = data
engine._setup_engine()
assert engine._init_iter == 10
def test_run_asserts(self):
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"Input data has zero size. Please provide non-empty data"):
engine.run([])
def test_state_get_event_attrib_value(self):
state = State()
state.iteration = 10
state.epoch = 9
e = Events.ITERATION_STARTED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.ITERATION_STARTED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
@pytest.mark.parametrize(
"data, max_epochs, epoch_length", [(range(100), 2, 100), (range(200), 2, 100), (range(200), 5, 100)]
)
def test_time_stored_in_state(self, data, max_epochs, epoch_length):
sleep_time = 0.01
extra_sleep_time = 0.1
engine = Engine(lambda e, b: time.sleep(sleep_time))
@engine.on(Events.EPOCH_COMPLETED)
def check_epoch_time():
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length
time.sleep(extra_sleep_time)
@engine.on(Events.COMPLETED)
def check_completed_time():
assert (
engine.state.times[Events.COMPLETED.name] >= (sleep_time * epoch_length + extra_sleep_time) * max_epochs
)
time.sleep(extra_sleep_time)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length + extra_sleep_time
assert (
engine.state.times[Events.COMPLETED.name]
>= (sleep_time * epoch_length + extra_sleep_time) * max_epochs + extra_sleep_time
)
def _test_check_triggered_events(self, data, max_epochs, epoch_length, exp_iter_stops=None):
engine = Engine(lambda e, b: 1)
events = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.DATALOADER_STOP_ITERATION,
]
handlers = {e: MagicMock() for e in events}
for e, handler in handlers.items():
engine.add_event_handler(e, handler)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
expected_num_calls = {
Events.STARTED: 1,
Events.COMPLETED: 1,
Events.EPOCH_STARTED: max_epochs,
Events.EPOCH_COMPLETED: max_epochs,
Events.ITERATION_STARTED: max_epochs * epoch_length,
Events.ITERATION_COMPLETED: max_epochs * epoch_length,
Events.GET_BATCH_STARTED: max_epochs * epoch_length,
Events.GET_BATCH_COMPLETED: max_epochs * epoch_length,
Events.DATALOADER_STOP_ITERATION: (max_epochs - 1) if exp_iter_stops is None else exp_iter_stops,
}
for n, handler in handlers.items():
assert handler.call_count == expected_num_calls[n], f"{n}: {handler.call_count} vs {expected_num_calls[n]}"
def _test_run_check_triggered_events(self):
# tests issue https://github.com/pytorch/ignite/issues/818
self._test_check_triggered_events(list(range(10)), max_epochs=4, epoch_length=10)
self._test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=100)
self._test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=50, exp_iter_stops=50 * 5 // 100)
self._test_check_triggered_events(
list(range(100)), max_epochs=5, epoch_length=150, exp_iter_stops=150 * 5 // 100
)
self._test_check_triggered_events(None, max_epochs=5, epoch_length=150)
def test_run_check_triggered_events_list(self):
self._test_run_check_triggered_events()
def _test_run_check_triggered_events_on_iterator(self):
def infinite_data_iterator():
while True:
for i in range(100):
yield i
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=100, exp_iter_stops=0)
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=50, exp_iter_stops=0)
self._test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=150, exp_iter_stops=0)
def limited_data_iterator():
for i in range(100):
yield i
self._test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=100, exp_iter_stops=0)
self._test_check_triggered_events(limited_data_iterator(), max_epochs=10, epoch_length=10, exp_iter_stops=0)
# These tests will fail
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=100)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=75)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
self._test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=101)
def test_run_check_triggered_events_on_iterator(self):
self._test_run_check_triggered_events_on_iterator()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(self, distributed_context_single_node_nccl):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(self, distributed_context_single_node_gloo):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(self, distributed_context_multi_node_gloo):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(self, distributed_context_multi_node_nccl):
self._test_run_check_triggered_events_on_iterator()
self._test_run_check_triggered_events()
def test_engine_random_state(self):
def random_data_generator():
while True:
yield torch.randint(0, 100, size=(5,))
def sum_data(_, batch):
result = torch.sum(batch)
return result
def get_engine():
engine = Engine(sum_data)
average = Average()
average.attach(engine, "average")
return engine
torch.manual_seed(34)
engine = get_engine()
state1 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(34)
engine = get_engine()
state2 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(42)
engine = get_engine()
state3 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
assert state1.metrics["average"] == pytest.approx(state2.metrics["average"])
assert state1.metrics["average"] != pytest.approx(state3.metrics["average"])
assert state2.metrics["average"] != pytest.approx(state3.metrics["average"])
def test_altered_random_state(self):
# tests issue https://github.com/pytorch/ignite/issues/795
size = 1
def random_train_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,))
def random_val_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,)) + 100
train_only_batches = []
def train_fn(_, batch):
train_only_batches.append(batch[0].item())
torch.manual_seed(1)
epoch_length = 6
trainer = Engine(train_fn)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
def val_fn(_1, _2):
pass
evaluator = Engine(val_fn)
train_batches = []
def train_fn2(_, batch):
train_batches.append(batch[0].item())
trainer = Engine(train_fn2)
@trainer.on(Events.EPOCH_COMPLETED)
@keep_random_state
def run_evaluation(_):
evaluator.run(random_val_data_generator(size), epoch_length=4)
torch.manual_seed(1)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
for i in range(epoch_length):
assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i]
assert train_batches[i] == train_only_batches[i]
def test_engine_with_dataloader_no_auto_batching(self):
# tests https://github.com/pytorch/ignite/issues/941
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
counter[0] += 1
engine = Engine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_once_finite_iterator_no_epoch_length(self):
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
completed_handler = MagicMock()
engine.add_event_handler(Events.COMPLETED, completed_handler)
data_iter = finite_unk_size_data_iter()
engine.run(data_iter)
assert engine.state.epoch == 1
assert engine.state.iteration == unknown_size
assert completed_handler.call_count == 1
def test_run_finite_iterator_no_epoch_length(self):
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
def test_run_finite_iterator_no_epoch_length_2(self):
# FR: https://github.com/pytorch/ignite/issues/871
known_size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
bc = BatchChecker(data=list(range(known_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.ITERATION_COMPLETED(every=known_size))
def restart_iter():
engine.state.dataloader = finite_size_data_iter(known_size)
data_iter = finite_size_data_iter(known_size)
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == known_size * 5
def test_faq_inf_iterator_with_epoch_length(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
# We need to specify epoch_length to define the epoch
trainer.run(infinite_iterator(4), epoch_length=5, max_epochs=3)
assert trainer.state.epoch == 3
assert trainer.state.iteration == 3 * 5
def test_faq_inf_iterator_no_epoch_length(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=15))
def stop_training():
trainer.terminate()
trainer.run(infinite_iterator(4))
assert trainer.state.epoch == 1
assert trainer.state.iteration == 15
def test_faq_fin_iterator_unknw_size(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
trainer.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * 11
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_unk_size_data_iter()
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == 1 * 11
def test_faq_fin_iterator(self):
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * size
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_size_data_iter(size)
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == size
def test_set_data(self):
# tests FR https://github.com/pytorch/ignite/issues/833
from torch.utils.data import DataLoader
num_iters1 = 10
num_iters2 = 20
batch_size = 4
torch.manual_seed(1)
data1 = DataLoader(torch.rand(num_iters1 * batch_size, 11), batch_size=batch_size)
data2 = DataLoader(torch.rand(num_iters2 * batch_size, 22), batch_size=batch_size)
switch_iteration = 35
def train_fn(e, batch):
if e.state.iteration <= switch_iteration:
assert batch.shape[1] == 11, f"{e.state.iteration}: {batch.shape}"
else:
assert batch.shape[1] == 22, f"{e.state.iteration}: {batch.shape}"
trainer = Engine(train_fn)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=10)
def test_run_with_max_iters(self):
max_iters = 8
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
assert engine.state.max_iters == max_iters
def test_run_with_max_iters_greater_than_epoch_length(self):
max_iters = 73
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
def test_run_with_invalid_max_iters_and_max_epoch(self):
max_iters = 12
max_epochs = 2
engine = Engine(lambda e, b: 1)
with pytest.raises(
ValueError,
match=r"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters.",
):
engine.run([0] * 20, max_iters=max_iters, max_epochs=max_epochs)
def test_epoch_events_fired_max_iters(self):
max_iters = 32
engine = Engine(lambda e, b: 1)
@engine.on(Events.EPOCH_COMPLETED)
def fired_event(engine):
assert engine.state.iteration % engine.state.epoch_length == 0
engine.run([0] * 10, max_iters=max_iters)
def test_is_done_with_max_iters(self):
state = State(iteration=100, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert not Engine._is_done(state)
state = State(iteration=250, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert Engine._is_done(state)
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_batch_is_released_before_new_one_is_loaded_on_cuda(self):
torch.cuda.empty_cache()
engine = Engine(lambda e, b: None)
def _test():
mem_consumption = []
def dataloader():
for _ in range(4):
mem_consumption.append(torch.cuda.memory_allocated())
batch = torch.randn(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
yield batch
engine.run(dataloader(), max_epochs=2, epoch_length=2)
return mem_consumption
mem_consumption1 = _test()
# mem_consumption should look like [0, 512, 512, 512, 512, 512, 512, 512]
assert len(set(mem_consumption1[1:])) == 1
mem_consumption2 = _test()
assert len(set(mem_consumption2[1:])) == 1
assert mem_consumption1 == mem_consumption2
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_output_is_released_before_new_one_is_assigned_on_cuda(self):
torch.cuda.empty_cache()
def _test():
mem_consumption = []
def update_fn(engine, batch):
mem_consumption.append(torch.cuda.memory_allocated())
output = torch.rand(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
return output
engine = Engine(update_fn)
engine.run([0, 1], max_epochs=2)
return mem_consumption
mem_consumption1 = _test()[2:]
# mem_consumption ~ [0, 512, 0, 512, 0, 512, 0, 512]
assert len(set(mem_consumption1)) == 2
mem_consumption2 = _test()[2:]
assert len(set(mem_consumption2)) == 2
assert mem_consumption1 == mem_consumption2
def test_engine_no_data_asserts(self):
trainer = Engine(lambda e, b: None)
with pytest.raises(ValueError, match=r"epoch_length should be provided if data is None"):
trainer.run(max_epochs=10)
def test_engine_no_data(self):
def train_step(engine, batch):
assert batch is None
trainer = Engine(train_step)
trainer.run(max_epochs=10, epoch_length=10)
assert trainer.state.iteration == 10 * 10
assert trainer.state.epoch == 10
assert trainer.state.dataloader is None
# continue
trainer.run(max_epochs=20)
assert trainer.state.iteration == 20 * 10
assert trainer.state.epoch == 20
assert trainer.state.dataloader is None
@pytest.mark.parametrize("data, epoch_length", [(None, 10), (range(10), None)])
def test_engine_run_resume(self, data, epoch_length):
# https://github.com/pytorch/ignite/wiki/Roadmap#runresume-logic-improvements
engine = Engine(lambda e, b: None)
real_epoch_length = len(data) if data is not None else epoch_length
first_epoch_iter = [None, None]
@engine.on(Events.STARTED, first_epoch_iter)
def check_iter_epoch(first_epoch_iter):
assert engine.state.epoch == first_epoch_iter[0]
assert engine.state.iteration == first_epoch_iter[1]
# (re)start from 0 to 5
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=5 => state.epoch=5
engine.run(data, max_epochs=5, epoch_length=epoch_length)
assert engine.state.epoch == 5
assert engine.state.iteration == 5 * real_epoch_length
# continue from 5 to 7
first_epoch_iter[0], first_epoch_iter[1] = 5, 5 * real_epoch_length
# Engine run resuming from iteration 50, epoch 5 until 7 epochs => state.epoch=7
engine.run(data, max_epochs=7, epoch_length=epoch_length)
assert engine.state.epoch == 7
assert engine.state.iteration == 7 * real_epoch_length
# error
with pytest.raises(ValueError, match="Argument max_epochs should be greater than or equal to the start epoch"):
engine.run(data, max_epochs=4, epoch_length=epoch_length)
# restart from 0 to 7 (As state.epoch == max_epochs(=7),
# this should be like that as we always do: evaluator.run(data) without any other instructions)
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=7 => state.epoch=7
engine.run(data, max_epochs=7, epoch_length=epoch_length)
assert engine.state.epoch == 7
assert engine.state.iteration == 7 * real_epoch_length
# forced restart from 0 to 5
engine.state.max_epochs = None
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=5 => state.epoch=5
engine.run(data, max_epochs=5, epoch_length=epoch_length)
assert engine.state.epoch == 5
assert engine.state.iteration == 5 * real_epoch_length
# forced restart from 0 to 9, instead of continue from state.epoch=5
engine.state.max_epochs = None
first_epoch_iter[0], first_epoch_iter[1] = 0, 0
# Engine run starting with max_epochs=9 => state.epoch=9
engine.run(data, max_epochs=9, epoch_length=epoch_length)
assert engine.state.epoch == 9
assert engine.state.iteration == 9 * real_epoch_length
# continue from 9 until 10
first_epoch_iter[0], first_epoch_iter[1] = 9, 9 * real_epoch_length
# Engine run resuming from iteration 90, epoch 9 until 10 epochs => state.epoch=10
engine.run(data, max_epochs=10, epoch_length=epoch_length)
assert engine.state.epoch == 10
assert engine.state.iteration == 10 * real_epoch_length
@pytest.mark.parametrize(
"interrupt_event, e, i",
[
(Events.EPOCH_STARTED(once=2), 2, None),
(Events.EPOCH_COMPLETED(once=2), 2, None),
(Events.GET_BATCH_STARTED(once=12), None, 12),
(Events.GET_BATCH_COMPLETED(once=12), None, 12),
(Events.ITERATION_STARTED(once=14), None, 14),
(Events.ITERATION_COMPLETED(once=14), None, 14),
],
)
def test_engine_run_interrupt_resume(interrupt_event, e, i):
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 5
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = RecordedEngine(check_input_data)
engine.run(data, max_epochs=max_epochs)
expected_called_events = list(engine.called_events)
engine.called_events = []
@engine.on(interrupt_event)
def call_interrupt():
engine.interrupt()
state = engine.run(data, max_epochs=max_epochs)
if i is None:
if interrupt_event == Events.EPOCH_STARTED:
i = len(data) * (e - 1)
else:
i = len(data) * e
if e is None:
e = i // len(data) + 1
# Check the last events
assert engine.called_events[-1] == (e, i, Events.INTERRUPT)
assert engine.called_events[-2] == (e, i, interrupt_event)
assert state.epoch == e
assert state.iteration == i
assert not engine.should_interrupt
# implementation detail check:
assert engine._dataloader_iter is not None
assert engine._internal_run_generator is not None
le = len(engine.called_events)
# We need to skip the last INTERRUPT event to compare
assert expected_called_events[: le - 1] == engine.called_events[:-1]
engine.called_events = []
@engine.on(Events.STARTED)
def raise_error():
raise RuntimeError("Shouldn't be here")
engine.run(data, max_epochs=max_epochs)
assert expected_called_events[le - 1 :] == engine.called_events
# implementation detail check:
assert engine._dataloader_iter is None
assert engine._internal_run_generator is None
def test_engine_run_multiple_interrupt_resume():
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 3
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
can_interrupt = True
@engine.on(Events.ITERATION_COMPLETED(every=6))
def call_interrupt():
if can_interrupt:
engine.interrupt()
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 1 and state.epoch == 1
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 2 and state.epoch == 2
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 3 and state.epoch == 2
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 6 * 4 and state.epoch == 3
# We did an interruption on the last epoch
assert state.epoch == max_epochs
# Run remaining iterations without interruptions
can_interrupt = False
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
# Check implementation details
assert engine._dataloader_iter is None
assert engine._internal_run_generator is None
# Rerun the engine from start to end without interruptions
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED)
def check_iter_epoch():
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == 0
assert engine.state.iteration == 0
num_calls_check_iter_epoch += 1
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
assert num_calls_check_iter_epoch == 1
def test_engine_should_interrupt_error():
Engine.interrupt_resume_enabled = False
engine = Engine(lambda e, b: None)
with pytest.raises(RuntimeError, match="Engine 'interrupt/resume' feature is disabled"):
engine.interrupt()
Engine.interrupt_resume_enabled = True
def test_engine_interrupt_restart():
assert Engine.interrupt_resume_enabled
data = range(10)
max_epochs = 3
def check_input_data(e, b):
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
can_interrupt = True
@engine.on(Events.ITERATION_COMPLETED(every=11))
def call_interrupt():
if can_interrupt:
engine.interrupt()
# Run and interrupt
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 11 and state.epoch == 2
num_calls_check_iter_epoch = 0
@engine.on(Events.STARTED)
def check_iter_epoch():
nonlocal num_calls_check_iter_epoch
assert engine.state.epoch == 0
assert engine.state.iteration == 0
num_calls_check_iter_epoch += 1
# Reset and run with interruption
state.max_epochs = None
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == 11 and state.epoch == 2
assert num_calls_check_iter_epoch == 1
can_interrupt = False
num_calls_check_iter_epoch = 0
# Reset and run without interruption
state.max_epochs = None
state = engine.run(data, max_epochs=max_epochs)
assert state.iteration == max_epochs * len(data) and state.epoch == max_epochs
assert num_calls_check_iter_epoch == 1
|
from collections.abc import Mapping
import pytest
import torch
from ignite.engine import Engine, Events, State
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == 3
assert "iteration" in sd and sd["iteration"] == 0
assert "max_epochs" in sd and sd["max_epochs"] is None
assert "epoch_length" in sd and sd["epoch_length"] is None
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test(State(iteration=500, epoch_length=1000, max_epochs=100))
_test(State(epoch=5, epoch_length=1000, max_epochs=100))
def test_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(state):
engine.state = state
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1 + len(
engine.state_dict_user_keys
)
assert sd["iteration"] == engine.state.iteration
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test(State(iteration=500, epoch_length=1000, max_epochs=100, alpha=0.01, beta="Good"))
def test_state_dict_integration():
engine = Engine(lambda e, b: 1)
data = range(100)
engine.run(data, max_epochs=10)
sd = engine.state_dict()
assert isinstance(sd, Mapping) and len(sd) == len(engine._state_dict_all_req_keys) + 1
assert sd["iteration"] == engine.state.iteration == 10 * 100
assert sd["epoch_length"] == engine.state.epoch_length == 100
assert sd["max_epochs"] == engine.state.max_epochs == 10
def test_load_state_dict_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary"):
engine.load_state_dict("123")
with pytest.raises(ValueError, match=r"is absent in provided state_dict"):
engine.load_state_dict({})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120})
with pytest.raises(ValueError, match=r"state_dict should contain only one of"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12, "epoch": 123})
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
with pytest.raises(ValueError, match=r"Required user state attribute"):
engine.load_state_dict({"max_epochs": 100, "epoch_length": 120, "iteration": 12})
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"If epoch is provided in the state dict, epoch_length should not be None"):
engine.load_state_dict({"max_epochs": 100, "epoch": 2, "epoch_length": None})
def test_load_state_dict():
engine = Engine(lambda e, b: 1)
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123})
_test({"max_epochs": 100, "epoch_length": 120, "epoch": 5})
def test_load_state_dict_with_user_keys():
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
def _test(sd):
engine.load_state_dict(sd)
if "iteration" in sd:
assert sd["iteration"] == engine.state.iteration
elif "epoch" in sd:
assert sd["epoch"] == engine.state.epoch
assert sd["epoch_length"] == engine.state.epoch_length
assert sd["max_epochs"] == engine.state.max_epochs
assert sd["alpha"] == engine.state.alpha
assert sd["beta"] == engine.state.beta
_test({"max_epochs": 100, "epoch_length": 120, "iteration": 123, "alpha": 0.1, "beta": "abc"})
def test_load_state_dict_integration():
engine = Engine(lambda e, b: 1)
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
engine.load_state_dict(state_dict)
engine.add_event_handler(Events.ITERATION_COMPLETED, IterationCounter(5 * 120 + 1))
engine.add_event_handler(Events.EPOCH_COMPLETED, EpochCounter(6))
data = range(120)
engine.run(data)
def test_load_state_dict_with_params_overriding_integration():
state_dict = {"max_epochs": 100, "epoch_length": 120, "epoch": 5}
data = range(120)
# Override max_epochs
new_max_epochs = 10
engine = Engine(lambda e, b: 1)
engine.load_state_dict(state_dict)
state = engine.run(data, max_epochs=new_max_epochs)
assert state.max_epochs == new_max_epochs
assert state.iteration == state_dict["epoch_length"] * new_max_epochs
assert state.epoch == new_max_epochs
with pytest.raises(ValueError, match=r"Argument max_epochs should be greater than or equal to the start epoch"):
engine.load_state_dict(state_dict)
engine.run(data, max_epochs=3)
# Override epoch_length
with pytest.raises(ValueError, match=r"Argument epoch_length should be same as in the state"):
engine.load_state_dict(state_dict)
engine.run(data, epoch_length=90)
def test_empty_state_dict_load_state_dict():
engine = Engine(lambda e, b: 1)
sd = engine.state_dict()
engine.load_state_dict(sd)
def test_continue_training():
# Tests issue : https://github.com/pytorch/ignite/issues/993
max_epochs = 2
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=max_epochs)
assert state.max_epochs == max_epochs
assert state.iteration == len(data) * max_epochs
assert state.epoch == max_epochs
@engine.on(Events.STARTED)
def assert_continue_training():
assert engine.state.epoch == max_epochs
state = engine.run(data, max_epochs=max_epochs * 2)
assert state.max_epochs == max_epochs * 2
assert state.iteration == len(data) * max_epochs * 2
assert state.epoch == max_epochs * 2
def test_state_dict_with_user_keys_integration(dirname):
engine = Engine(lambda e, b: 1)
engine.state_dict_user_keys.append("alpha")
@engine.on(Events.STARTED)
def init_user_values(_):
engine.state.alpha = 0.1
fp = dirname / "engine.pt"
@engine.on(Events.COMPLETED)
def save_engine(_):
state_dict = engine.state_dict()
assert "alpha" in state_dict
torch.save(state_dict, fp)
engine.run([0, 1])
assert fp.exists()
state_dict = torch.load(fp)
assert "alpha" in state_dict and state_dict["alpha"] == 0.1
def test_epoch_length():
def _test(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(data, max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
def _test_as_iter(data, max_epochs, num_iters):
batch_checker = BatchChecker(data)
def update_fn(_, batch):
assert batch_checker.check(batch), f"{batch_checker.counter}: {batch_checker.true_batch} vs {batch}"
engine = Engine(update_fn)
engine.run(iter(data), max_epochs=max_epochs, epoch_length=num_iters)
if num_iters is None:
num_iters = len(data)
assert engine.state.iteration == num_iters * max_epochs
assert engine.state.epoch == max_epochs
max_epochs = 10
num_iters = 21
data = torch.randint(0, 1000, size=(num_iters,))
_test(data, max_epochs, num_iters=None)
_test(data, max_epochs, num_iters)
_test(data, max_epochs, num_iters // 2)
_test(data, max_epochs, num_iters * 2)
_test_as_iter(data, 1, num_iters)
_test_as_iter(data, 2, num_iters // 2)
def test_state_custom_attrs_init():
def _test(with_load_state_dict=False):
engine = Engine(lambda e, b: None)
engine.state.alpha = 0.0
engine.state.beta = 1.0
if with_load_state_dict:
engine.load_state_dict({"iteration": 3, "max_epochs": 5, "epoch_length": 5})
@engine.on(Events.STARTED | Events.EPOCH_STARTED | Events.EPOCH_COMPLETED | Events.COMPLETED)
def check_custom_attr():
assert hasattr(engine.state, "alpha") and engine.state.alpha == 0.0
assert hasattr(engine.state, "beta") and engine.state.beta == 1.0
engine.run([0, 1, 2, 3, 4], max_epochs=5)
_test()
_test(with_load_state_dict=True)
def test_restart_training():
data = range(10)
engine = Engine(lambda e, b: 1)
state = engine.run(data, max_epochs=5)
with pytest.raises(
ValueError,
match=r"Argument max_epochs should be greater than or equal to the start epoch defined in the state: 2 vs 5. "
r"Please, .+ "
r"before calling engine.run\(\) in order to restart the training from the beginning.",
):
state = engine.run(data, max_epochs=2)
state.max_epochs = None
engine.run(data, max_epochs=2)
|
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
self.true_batch = self.data[self.counter % len(self.data)]
self.counter += 1
res = self.true_batch == batch
return res.all() if not isinstance(res, bool) else res
class IterationCounter:
def __init__(self, start_value=1):
self.current_iteration_count = start_value
def __call__(self, engine):
assert engine.state.iteration == self.current_iteration_count
self.current_iteration_count += 1
class EpochCounter:
def __init__(self, start_value=1):
self.current_epoch_count = start_value
def __call__(self, engine):
assert engine.state.epoch == self.current_epoch_count
self.current_epoch_count += 1
def setup_sampler(sampler_type, num_iters, batch_size):
if sampler_type is None:
return None, batch_size
if sampler_type == "weighted":
from torch.utils.data.sampler import WeightedRandomSampler
w = torch.ones(num_iters * batch_size, dtype=torch.float)
for i in range(num_iters):
w[batch_size * i : batch_size * (i + 1)] += i * 1.0
return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True), batch_size
if sampler_type == "distributed":
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
num_replicas = 1
rank = 0
if dist.is_available() and dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
dataset = torch.zeros(num_iters * batch_size)
return DistributedSampler(dataset, num_replicas=num_replicas, rank=rank), batch_size // num_replicas
class MyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(MyIterableDataset).__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def get_iterable_dataset(*args, **kwargs):
return MyIterableDataset(*args, **kwargs)
|
from enum import Enum
from unittest.mock import MagicMock
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, EventEnum, EventsList
def test_custom_events():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
# Dummy engine
engine = Engine(lambda engine, batch: 0)
engine.register_events(*CustomEvents)
engine.register_events("a", "b", "c")
evs = [CustomEvents.TEST_EVENT, "a", "b", "c"]
# Handle is never called
handlers = [(e, MagicMock()) for e in evs]
for e, h in handlers:
engine.add_event_handler(e, h)
engine.run(range(1))
for _, h in handlers:
assert not h.called
# Advanced engine
def process_func(engine, batch):
for e, _ in handlers:
engine.fire_event(e)
engine = Engine(process_func)
engine.register_events(*CustomEvents)
engine.register_events("a", "b", "c")
# Handle should be called
handlers = [(e, MagicMock()) for e in evs]
for e, h in handlers:
engine.add_event_handler(e, h)
engine.run(range(1))
for _, h in handlers:
assert h.called
def test_custom_events_asserts():
# Dummy engine
engine = Engine(lambda engine, batch: 0)
class A:
pass
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(None)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events("str", None)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(1)
with pytest.raises(TypeError, match=r"Value at \d of event_names should be a str or EventEnum"):
engine.register_events(A())
assert Events.EPOCH_COMPLETED != 1
assert Events.EPOCH_COMPLETED != "abc"
assert Events.ITERATION_COMPLETED != Events.EPOCH_COMPLETED
assert Events.ITERATION_COMPLETED != Events.EPOCH_COMPLETED(every=2)
# In current implementation, EPOCH_COMPLETED and EPOCH_COMPLETED with event filter are the same
assert Events.EPOCH_COMPLETED == Events.EPOCH_COMPLETED(every=2)
assert Events.ITERATION_COMPLETED == Events.ITERATION_COMPLETED(every=2)
def test_custom_events_with_event_to_attr():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
custom_event_to_attr = {CustomEvents.TEST_EVENT: "test_event"}
# Dummy engine
engine = Engine(lambda engine, batch: 0)
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
# Handle is never called
handle = MagicMock()
engine.add_event_handler(CustomEvents.TEST_EVENT, handle)
engine.run(range(1))
assert hasattr(engine.state, "test_event")
assert engine.state.test_event == 0
# Advanced engine
def process_func(engine, batch):
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(process_func)
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
def handle(engine):
engine.state.test_event += 1
engine.add_event_handler(CustomEvents.TEST_EVENT, handle)
engine.run(range(25))
assert engine.state.test_event == 25
custom_event_to_attr = "a"
engine = Engine(lambda engine, batch: 0)
with pytest.raises(ValueError):
engine.register_events(*CustomEvents, event_to_attr=custom_event_to_attr)
def test_custom_events_with_events_list():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
def process_func(engine, batch):
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(process_func)
engine.register_events(*CustomEvents)
# Handle should be called
handle = MagicMock()
engine.add_event_handler(CustomEvents.TEST_EVENT | Events.STARTED, handle)
engine.run(range(1))
assert handle.called
def test_callable_events_with_wrong_inputs():
def ef(e, i):
return 1
expected_raise = {
# event_filter, every, once, before, after
(None, None, None, None, None): True, # raises ValueError
(ef, None, None, None, None): False,
(None, 2, None, None, None): False,
(ef, 2, None, None, None): True,
(None, None, 2, None, None): False,
(ef, None, 2, None, None): True,
(None, 2, 2, None, None): True,
(ef, 2, 2, None, None): True,
(None, None, None, 30, None): False,
(ef, None, None, 30, None): True,
(None, 2, None, 30, None): False,
(ef, 2, None, 30, None): True,
(None, None, 2, 30, None): True,
(ef, None, 2, 30, None): True,
(None, 2, 2, 30, None): True,
(ef, 2, 2, 30, None): True,
# event_filter, every, once, before, after
(None, None, None, None, 10): False,
(ef, None, None, None, 10): True,
(None, 2, None, None, 10): False,
(ef, 2, None, None, 10): True,
(None, None, 2, None, 10): True,
(ef, None, 2, None, 10): True,
(None, 2, 2, None, 10): True,
(ef, 2, 2, None, 10): True,
(None, None, None, 25, 8): False,
(ef, None, None, 25, 8): True,
(None, 2, None, 25, 8): False,
(ef, 2, None, 25, 8): True,
(None, None, 2, 25, 8): True,
(ef, None, 2, 25, 8): True,
(None, 2, 2, 25, 8): True,
(ef, 2, 2, 25, 8): True,
}
for event_filter in [None, ef]:
for every in [None, 2]:
for once in [None, 2]:
for before, after in [(None, None), (None, 10), (30, None), (25, 8)]:
if expected_raise[(event_filter, every, once, before, after)]:
with pytest.raises(
ValueError,
match=r"Only one of the input arguments should be specified, "
"except before, after and every",
):
Events.ITERATION_STARTED(
event_filter=event_filter, once=once, every=every, before=before, after=after
)
else:
Events.ITERATION_STARTED(
event_filter=event_filter, once=once, every=every, before=before, after=after
)
with pytest.raises(TypeError, match=r"Argument event_filter should be a callable"):
Events.ITERATION_STARTED(event_filter="123")
with pytest.raises(ValueError, match=r"Argument every should be integer and greater than zero"):
Events.ITERATION_STARTED(every=-1)
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=-1)
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=[1, 10.0, "pytorch"])
with pytest.raises(
ValueError, match=r"Argument once should either be a positive integer or a list of positive integers, got .+"
):
Events.ITERATION_STARTED(once=[])
with pytest.raises(ValueError, match=r"Argument before should be integer and greater or equal to zero"):
Events.ITERATION_STARTED(before=-1)
with pytest.raises(ValueError, match=r"Argument after should be integer and greater or equal to zero"):
Events.ITERATION_STARTED(after=-1)
with pytest.raises(ValueError, match=r"but will be called with"):
Events.ITERATION_STARTED(event_filter=lambda x: x)
with pytest.warns(UserWarning, match=r"default_event_filter is deprecated and will be removed"):
Events.default_event_filter(None, None)
@pytest.mark.parametrize(
"event",
[
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.STARTED,
Events.COMPLETED,
],
)
def test_callable_events(event):
assert isinstance(event.value, str)
def foo(engine, _):
return True
ret = event(event_filter=foo)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter == foo
assert event.name in f"{ret}"
ret = event(every=10)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event(once=10)
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event(once=[1, 10])
assert isinstance(ret, CallableEventWithFilter)
assert ret == event
assert ret.filter is not None
assert event.name in f"{ret}"
ret = event
assert isinstance(ret, CallableEventWithFilter)
assert ret.filter is None
assert event.name in f"{ret}"
def test_callable_events_every_eq_one():
e = Events.ITERATION_STARTED(every=1)
assert isinstance(e, CallableEventWithFilter)
def test_has_handler_on_callable_events():
engine = Engine(lambda e, b: 1)
def foo(e):
pass
assert not engine.has_event_handler(foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo)
assert engine.has_event_handler(foo)
def bar(e):
pass
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
assert engine.has_event_handler(bar, Events.EPOCH_COMPLETED)
assert engine.has_event_handler(bar, Events.EPOCH_COMPLETED(every=3))
def test_remove_event_handler_on_callable_events():
engine = Engine(lambda e, b: 1)
def foo(e):
pass
assert not engine.has_event_handler(foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo)
assert engine.has_event_handler(foo)
engine.remove_event_handler(foo, Events.EPOCH_STARTED)
assert not engine.has_event_handler(foo)
def bar(e):
pass
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
engine.remove_event_handler(bar, Events.EPOCH_COMPLETED)
assert not engine.has_event_handler(bar)
engine.add_event_handler(Events.EPOCH_COMPLETED(every=3), bar)
assert engine.has_event_handler(bar)
engine.remove_event_handler(bar, Events.EPOCH_COMPLETED(every=3))
assert not engine.has_event_handler(bar)
def _test_every_event_filter_with_engine(device="cpu"):
data = torch.rand(100, 4, device=device)
def _test(event_name, event_attr, every, true_num_calls):
engine = Engine(lambda e, b: b)
counter = [0]
counter_every = [0]
num_calls = [0]
@engine.on(event_name(every=every))
def assert_every(engine):
counter_every[0] += every
assert getattr(engine.state, event_attr) % every == 0
assert counter_every[0] == getattr(engine.state, event_attr)
num_calls[0] += 1
@engine.on(event_name(every=every))
def assert_every_no_engine():
assert getattr(engine.state, event_attr) % every == 0
assert counter_every[0] == getattr(engine.state, event_attr)
@engine.on(event_name)
def assert_(engine):
counter[0] += 1
assert getattr(engine.state, event_attr) == counter[0]
@engine.on(event_name)
def assert_no_engine():
assert getattr(engine.state, event_attr) == counter[0]
engine.run(data, max_epochs=5)
assert num_calls[0] == true_num_calls
_test(Events.ITERATION_STARTED, "iteration", 10, 100 * 5 // 10)
_test(Events.ITERATION_COMPLETED, "iteration", 10, 100 * 5 // 10)
_test(Events.EPOCH_STARTED, "epoch", 2, 5 // 2)
_test(Events.EPOCH_COMPLETED, "epoch", 2, 5 // 2)
def test_every_event_filter_with_engine():
_test_every_event_filter_with_engine()
@pytest.mark.parametrize(
"event_name, event_attr, before, expect_calls",
[
(Events.ITERATION_COMPLETED, "iteration", 0, 0),
(Events.ITERATION_COMPLETED, "iteration", 300, 299),
(Events.ITERATION_COMPLETED, "iteration", 501, 500),
(Events.EPOCH_COMPLETED, "epoch", 0, 0),
(Events.EPOCH_COMPLETED, "epoch", 3, 2),
(Events.EPOCH_COMPLETED, "epoch", 6, 5),
],
)
def test_before_event_filter_with_engine(event_name, event_attr, before, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(before=before))
def _before_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) < before
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, after, expect_calls",
[
(Events.ITERATION_STARTED, "iteration", 0, 500),
(Events.ITERATION_COMPLETED, "iteration", 300, 200),
(Events.ITERATION_COMPLETED, "iteration", 500, 0),
(Events.EPOCH_STARTED, "epoch", 0, 5),
(Events.EPOCH_COMPLETED, "epoch", 3, 2),
(Events.EPOCH_COMPLETED, "epoch", 5, 0),
],
)
def test_after_event_filter_with_engine(event_name, event_attr, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(after=after))
def _after_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) > after
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, before, after, expect_calls",
[(Events.ITERATION_STARTED, "iteration", 300, 100, 199), (Events.EPOCH_COMPLETED, "epoch", 4, 1, 2)],
)
def test_before_and_after_event_filter_with_engine(event_name, event_attr, before, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(before=before, after=after))
def _before_and_after_event():
nonlocal num_calls
num_calls += 1
assert getattr(engine.state, event_attr) > after
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, every, before, after, expect_calls",
[(Events.ITERATION_STARTED, "iteration", 5, 25, 8, 4), (Events.EPOCH_COMPLETED, "epoch", 2, 5, 1, 2)],
)
def test_every_before_and_after_event_filter_with_engine(event_name, event_attr, every, before, after, expect_calls):
data = range(100)
engine = Engine(lambda e, b: 1)
num_calls = 0
@engine.on(event_name(every=every, before=before, after=after))
def _every_before_and_after_event():
assert getattr(engine.state, event_attr) > after
assert getattr(engine.state, event_attr) < before
assert ((getattr(engine.state, event_attr) - after - 1) % every) == 0
nonlocal num_calls
num_calls += 1
engine.run(data, max_epochs=5)
assert num_calls == expect_calls
@pytest.mark.parametrize(
"event_name, event_attr, once, expect_calls",
[
(Events.ITERATION_STARTED, "iteration", 2, 1),
(Events.ITERATION_COMPLETED, "iteration", 2, 1),
(Events.EPOCH_STARTED, "epoch", 2, 1),
(Events.EPOCH_COMPLETED, "epoch", 2, 1),
(Events.ITERATION_STARTED, "iteration", [1, 5], 2),
(Events.ITERATION_COMPLETED, "iteration", [1, 5], 2),
(Events.EPOCH_STARTED, "epoch", [1, 5], 2),
(Events.EPOCH_COMPLETED, "epoch", [1, 5], 2),
],
)
def test_once_event_filter(event_name, event_attr, once, expect_calls):
data = list(range(100))
engine = Engine(lambda e, b: b)
num_calls = [0]
counter = [0]
test_once = [once] if isinstance(once, int) else once
@engine.on(event_name(once=once))
def assert_once(engine):
assert getattr(engine.state, event_attr) in test_once
num_calls[0] += 1
@engine.on(event_name)
def assert_(engine):
counter[0] += 1
assert getattr(engine.state, event_attr) == counter[0]
engine.run(data, max_epochs=10)
assert num_calls[0] == expect_calls
def test_custom_event_filter_with_engine():
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
def _test(event_name, event_attr, true_num_calls):
engine = Engine(lambda e, b: b)
num_calls = [0]
@engine.on(event_name(event_filter=custom_event_filter))
def assert_on_special_event(engine):
assert getattr(engine.state, event_attr) == special_events.pop(0)
num_calls[0] += 1
d = list(range(50))
engine.run(d, max_epochs=25)
assert num_calls[0] == true_num_calls
_test(Events.ITERATION_STARTED, "iteration", len(special_events))
_test(Events.ITERATION_COMPLETED, "iteration", len(special_events))
_test(Events.EPOCH_STARTED, "epoch", len(special_events))
_test(Events.EPOCH_COMPLETED, "epoch", len(special_events))
def test_callable_event_bad_behaviour():
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
# Check bad behaviour
engine = Engine(lambda e, b: b)
counter = [0]
# Modify events
Events.ITERATION_STARTED(event_filter=custom_event_filter)
@engine.on(Events.ITERATION_STARTED)
def assert_all_iters(engine):
counter[0] += 1
assert engine.state.iteration == counter[0]
d = list(range(50))
engine.run(d, max_epochs=25)
assert counter[0] == engine.state.iteration
def test_custom_callable_events():
class CustomEvents(Enum):
TEST_EVENT = "test_event"
with pytest.raises(TypeError, match=r"object is not callable"):
CustomEvents.TEST_EVENT(every=10)
class CustomEvents2(EventEnum):
TEST_EVENT = "test_event"
CustomEvents2.TEST_EVENT(every=10)
def test_custom_callable_events_with_engine():
class CustomEvents(EventEnum):
TEST_EVENT = "test_event"
event_to_attr = {CustomEvents.TEST_EVENT: "test_event"}
special_events = [1, 2, 5, 7, 17, 20]
def custom_event_filter(engine, event):
if event in special_events:
return True
return False
def _test(event_name, event_attr, true_num_calls):
def update_fn(engine, batch):
engine.state.test_event = engine.state.iteration
engine.fire_event(CustomEvents.TEST_EVENT)
engine = Engine(update_fn)
engine.register_events(*CustomEvents, event_to_attr=event_to_attr)
num_calls = [0]
@engine.on(event_name(event_filter=custom_event_filter))
def assert_on_special_event(engine):
assert getattr(engine.state, event_attr) == special_events.pop(0)
num_calls[0] += 1
d = list(range(50))
engine.run(d, max_epochs=25)
assert num_calls[0] == true_num_calls
_test(CustomEvents.TEST_EVENT, "test_event", len(special_events))
def _test_every_event_filter_with_engine_with_dataloader(device):
def _test(num_workers):
max_epochs = 3
batch_size = 4
num_iters = 21
data = torch.randint(0, 1000, size=(num_iters * batch_size,))
dataloader = torch.utils.data.DataLoader(
data,
batch_size=batch_size,
num_workers=num_workers,
pin_memory="cuda" in torch.device(device).type,
drop_last=True,
shuffle=True,
)
seen_batchs = []
def update_fn(_, batch):
batch_to_device = batch.to(device)
seen_batchs.append(batch)
engine = Engine(update_fn)
def foo(_):
pass
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo)
engine.run(dataloader, max_epochs=max_epochs)
engine = None
import gc
gc.collect()
assert len(gc.garbage) == 0
_test(num_workers=0)
_test(num_workers=1)
def test_every_event_filter_with_engine_with_dataloader():
_test_every_event_filter_with_engine_with_dataloader("cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_every_event_filter_with_engine(device)
_test_every_event_filter_with_engine_with_dataloader(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_every_event_filter_with_engine(device)
_test_every_event_filter_with_engine_with_dataloader(device)
def test_event_list():
e1 = Events.ITERATION_STARTED(once=1)
e2 = Events.ITERATION_STARTED(every=3)
e3 = Events.COMPLETED
event_list = e1 | e2 | e3
assert isinstance(event_list, EventsList)
assert len(event_list) == 3
assert event_list[0] == e1
assert event_list[1] == e2
assert event_list[2] == e3
def test_list_of_events():
def _test(event_list, true_iterations):
engine = Engine(lambda e, b: b)
iterations = []
num_calls = [0]
@engine.on(event_list)
def execute_some_handler(e):
iterations.append(e.state.iteration)
num_calls[0] += 1
engine.run(range(3), max_epochs=5)
assert iterations == true_iterations
assert num_calls[0] == len(true_iterations)
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(once=1), [1, 1])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(once=10), [1, 10])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(every=3), [1, 3, 6, 9, 12, 15])
_test(Events.ITERATION_STARTED(once=8) | Events.ITERATION_STARTED(before=3), [1, 2, 8])
_test(Events.ITERATION_STARTED(once=1) | Events.ITERATION_STARTED(after=12), [1, 13, 14, 15])
|
import os
from importlib.util import find_spec
from typing import Optional, Union
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
import torch
from packaging.version import Version
from pytest import approx
from torch.nn.functional import mse_loss
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import (
_check_arg,
create_supervised_evaluator,
create_supervised_trainer,
Engine,
Events,
supervised_evaluation_step,
supervised_evaluation_step_amp,
supervised_training_step_tpu,
)
from ignite.metrics import MeanSquaredError
class DummyModel(torch.nn.Module):
def __init__(self, output_as_list=False):
super(DummyModel, self).__init__()
self.output_as_list = output_as_list
self.fc = torch.nn.Linear(1, 1, bias=False)
def forward(self, x):
if self.output_as_list:
return self.fc(x), self.fc(x)
return self.fc(x)
def _default_create_supervised_trainer(
gradient_accumulation_steps: int = 1,
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
with_model_transform: bool = False,
):
if with_model_transform:
def get_first_element(output):
return output[0]
model = DummyModel(output_as_list=True)
model_transform = get_first_element
else:
model = DummyModel()
model_transform = None
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
if trace:
example_input = torch.randn(1)
model = torch.jit.trace(model, example_input)
if amp_mode == "apex" and model_device == trainer_device == "cuda":
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
trainer = create_supervised_trainer(
model,
optimizer,
mse_loss,
device=trainer_device,
output_transform=lambda x, y, y_pred, loss: (y_pred, loss.item()),
amp_mode=amp_mode,
scaler=scaler,
gradient_accumulation_steps=gradient_accumulation_steps,
model_transform=model_transform if model_transform is not None else lambda x: x,
)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
return trainer, model
def _test_create_supervised_trainer(
gradient_accumulation_steps: int = 1,
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
with_model_transform: bool = False,
):
trainer, model = _default_create_supervised_trainer(
gradient_accumulation_steps=gradient_accumulation_steps,
model_device=model_device,
trainer_device=trainer_device,
trace=trace,
amp_mode=amp_mode,
scaler=scaler,
with_model_transform=with_model_transform,
)
x = torch.tensor([[0.01], [0.02], [0.03], [0.04], [0.05]])
y = torch.tensor([[0.015], [0.025], [0.035], [0.045], [0.055]])
data = [(_x, _y) for _x, _y in zip(x, y)]
theta = [0.0]
accumulation = [0.0]
loss = [0.0]
@trainer.on(Events.ITERATION_COMPLETED)
def _():
assert model.fc.weight.grad != 0
_x, _y = trainer.state.batch
_x, _y = _x.to(model_device), _y.to(model_device)
accumulation[0] += 0.2 * _x.item() * (theta[0] * _x.item() - _y.item())
# value of loss should not be accumulated
if with_model_transform:
loss[0] = mse_loss(model(_x)[0], _y).item()
else:
loss[0] = mse_loss(model(_x), _y).item()
@trainer.on(Events.ITERATION_COMPLETED(every=gradient_accumulation_steps))
def _():
theta[0] -= accumulation[0] / gradient_accumulation_steps
assert pytest.approx(model.fc.weight.data[0, 0].item(), abs=1.0e-5) == theta[0]
assert pytest.approx(trainer.state.output[-1], abs=1e-5) == loss[0]
accumulation[0] = loss[0] = 0.0
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
state = trainer.run(data)
if amp_mode == "amp":
assert state.output[0].dtype is torch.half
if scaler and isinstance(scaler, bool):
assert hasattr(state, "scaler")
else:
assert not hasattr(state, "scaler")
else:
if Version(torch.__version__) >= Version("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"Expected all tensors to be on the same device"):
trainer.run(data)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
def test_create_supervised_training_scalar_assignment():
with mock.patch("ignite.engine._check_arg") as check_arg_mock:
check_arg_mock.return_value = None, torch.cuda.amp.GradScaler(enabled=False)
trainer, _ = _default_create_supervised_trainer(model_device="cpu", trainer_device="cpu", scaler=True)
assert hasattr(trainer.state, "scaler")
assert isinstance(trainer.state.scaler, torch.cuda.amp.GradScaler)
def _test_create_mocked_supervised_trainer(
model_device: Optional[str] = None,
trainer_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
):
with mock.patch("ignite.engine.supervised_training_step_amp") as training_step_amp_mock:
with mock.patch("ignite.engine.supervised_training_step_apex") as training_step_apex_mock:
with mock.patch("ignite.engine.supervised_training_step_tpu") as training_step_tpu_mock:
with mock.patch("ignite.engine.supervised_training_step") as training_step_mock:
trainer, _ = _default_create_supervised_trainer(
model_device=model_device,
trainer_device=trainer_device,
trace=trace,
amp_mode=amp_mode,
scaler=scaler,
)
x = torch.tensor([[0.1], [0.2]])
y = torch.tensor([[0.3], [0.5]])
data = [(x, y)]
on_tpu = "xla" in trainer_device if trainer_device is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, scaler)
if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")):
trainer.run(data)
if mode == "amp":
assert training_step_amp_mock.called
elif mode == "apex":
assert training_step_apex_mock.called
elif mode == "tpu":
assert training_step_tpu_mock.called
else:
assert training_step_mock.called
def _test_create_supervised_trainer_wrong_accumulation(
model_device=None, trainer_device=None, amp_mode=None, trace=False
):
with pytest.raises(ValueError, match="Gradient_accumulation_steps must be strictly positive."):
_default_create_supervised_trainer(
gradient_accumulation_steps=0,
model_device=model_device,
trainer_device=trainer_device,
amp_mode=amp_mode,
trace=trace,
)
def _default_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
with_model_transform: bool = False,
):
if with_model_transform:
def get_first_element(output):
return output[0]
model = DummyModel(output_as_list=True)
model_transform = get_first_element
else:
model = DummyModel()
model_transform = None
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
evaluator = create_supervised_evaluator(
model,
device=evaluator_device,
amp_mode=amp_mode,
model_transform=model_transform if model_transform is not None else lambda x: x,
)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
return model, evaluator
def _test_create_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
with_model_transform: bool = False,
):
model, evaluator = _default_create_supervised_evaluator(
model_device=model_device,
evaluator_device=evaluator_device,
trace=trace,
amp_mode=amp_mode,
with_model_transform=with_model_transform,
)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
state = evaluator.run(data)
y_pred, y = state.output
assert y_pred[0, 0].item() == approx(0.0)
assert y_pred[1, 0].item() == approx(0.0)
assert y[0, 0].item() == approx(3.0)
assert y[1, 0].item() == approx(5.0)
assert model.fc.weight.data[0, 0].item() == approx(0.0)
else:
if Version(torch.__version__) >= Version("1.7.0"):
# This is broken in 1.6.0 but will be probably fixed with 1.7.0
with pytest.raises(RuntimeError, match=r"Expected all tensors to be on the same device"):
evaluator.run(data)
def _test_mocked_supervised_evaluator(
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
with mock.patch("ignite.engine.supervised_evaluation_step") as evaluation_step:
with mock.patch("ignite.engine.supervised_evaluation_step_amp") as evaluation_step_amp:
_, evaluator = _default_create_supervised_evaluator(
model_device=model_device, evaluator_device=evaluator_device, trace=trace, amp_mode=amp_mode
)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
evaluator.run(data)
if amp_mode == "amp":
assert evaluation_step_amp.called
assert not evaluation_step.called
else:
assert evaluation_step.called
assert not evaluation_step_amp.called
def _test_create_evaluation_step_amp(
autocast_mock,
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
output_transform_mock = MagicMock()
model = DummyModel()
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
device_type = evaluator_device.type if isinstance(evaluator_device, torch.device) else evaluator_device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
evaluate_step = supervised_evaluation_step_amp(model, evaluator_device, output_transform=output_transform_mock)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
evaluator = Engine(evaluate_step)
evaluator.run(data)
assert autocast_mock.called
assert output_transform_mock.called
def _test_create_evaluation_step(
mock_torch_cuda_amp_module,
model_device: Optional[str] = None,
evaluator_device: Optional[str] = None,
trace: bool = False,
amp_mode: str = None,
):
output_transform_mock = MagicMock()
model = DummyModel()
if model_device:
model.to(model_device)
model.fc.weight.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
device_type = evaluator_device.type if isinstance(evaluator_device, torch.device) else evaluator_device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
evaluate_step = supervised_evaluation_step(model, evaluator_device, output_transform=output_transform_mock)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
evaluator = Engine(evaluate_step)
evaluator.run(data)
assert not mock_torch_cuda_amp_module.called
assert output_transform_mock.called
@pytest.mark.parametrize("trainer_device", [None, "cpu"])
@pytest.mark.parametrize("trace", [False, True])
def test_create_supervised_trainer(trainer_device, trace):
_test_create_supervised_trainer_wrong_accumulation(trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(gradient_accumulation_steps=1, trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(gradient_accumulation_steps=3, trainer_device=trainer_device, trace=trace)
_test_create_supervised_trainer(with_model_transform=True, trainer_device=trainer_device, trace=trace)
_test_create_mocked_supervised_trainer(trainer_device=trainer_device, trace=trace)
@pytest.mark.skipif(find_spec("apex"), reason="Skip if APEX")
def test_create_supervised_trainer_apex_error():
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cpu", amp_mode="apex")
with pytest.raises(
ModuleNotFoundError, match="Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'."
):
_test_create_supervised_trainer(amp_mode="apex")
@pytest.fixture
def mock_torch_cuda_amp_module():
with patch.dict(
"sys.modules",
{"torch.cuda.amp": None, "torch.cuda.amp.grad_scaler": None, "torch.cuda.amp.autocast_mode": None},
):
yield torch
def test_create_supervised_trainer_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cpu", amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_trainer(amp_mode="amp")
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use scaler argument."):
_test_create_supervised_trainer(amp_mode="amp", scaler=True)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.5.0"), reason="Skip if < 1.5.0")
def test_create_supervised_trainer_scaler_not_amp():
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=scaler)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is None."):
_test_create_supervised_trainer(amp_mode=None, scaler=True)
with pytest.raises(ValueError, match="scaler argument is True, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=True)
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is apex."):
_test_create_supervised_trainer(amp_mode="apex", scaler=scaler)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(model_device=model_device, trainer_device=trainer_device)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_amp_scaler():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=True,
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=True,
)
_test_create_mocked_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=True
)
scaler = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
_test_create_supervised_trainer(
gradient_accumulation_steps=1,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=scaler,
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3,
model_device=model_device,
trainer_device=trainer_device,
amp_mode="amp",
scaler=scaler,
)
_test_create_mocked_supervised_trainer(
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=scaler
)
# @pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
# @pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
@pytest.mark.skip(reason="Temporarily disabled, as it fails because of an issue from apex side")
def test_create_supervised_trainer_on_cuda_apex():
model_device = trainer_device = "cuda"
_test_create_supervised_trainer_wrong_accumulation(
model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex")
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_supervised_training_step_tpu_no_xla():
with pytest.raises(ModuleNotFoundError, match="torch_xla cannot be imported, please install PyTorch XLA."):
supervised_training_step_tpu(model=None, optimizer=None, loss_fn=None)
@pytest.mark.skipif(idist.has_xla_support, reason="Skip if has PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_no_xla():
model_device = "cpu"
trainer_device = "xla"
with pytest.raises(RuntimeError, match=r"In order to run on TPU, please install PyTorch XLA"):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu():
model_device = trainer_device = "xla"
_test_create_supervised_trainer_wrong_accumulation(model_device=model_device, trainer_device=trainer_device)
_test_create_supervised_trainer(
gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device
)
_test_create_supervised_trainer(
gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device)
@pytest.mark.tpu
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_create_supervised_trainer_on_tpu_amp():
model_device = trainer_device = "xla"
with pytest.raises(ValueError, match="amp_mode cannot be used with xla device."):
_test_create_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_trainer_on_cuda_with_model_on_cpu():
_test_create_supervised_trainer_wrong_accumulation(trainer_device="cuda")
_test_create_supervised_trainer(gradient_accumulation_steps=1, trainer_device="cuda")
_test_create_supervised_trainer(gradient_accumulation_steps=3, trainer_device="cuda")
_test_create_mocked_supervised_trainer(trainer_device="cuda")
def test_create_supervised_evaluator():
_test_create_supervised_evaluator()
_test_mocked_supervised_evaluator()
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module)
def test_create_supervised_evaluator_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu")
_test_mocked_supervised_evaluator(evaluator_device="cpu")
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu")
_test_create_evaluation_step_amp(mock_torch_cuda_amp_module, evaluator_device="cpu")
def test_create_supervised_evaluator_traced_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cpu", trace=True)
_test_mocked_supervised_evaluator(evaluator_device="cpu", trace=True)
# older versions didn't have the autocast method so we skip the test for older builds
if Version(torch.__version__) >= Version("1.6.0"):
with mock.patch("torch.cuda.amp.autocast") as mock_torch_cuda_amp_module:
_test_create_evaluation_step(mock_torch_cuda_amp_module, evaluator_device="cpu", trace=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
_test_mocked_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_with_model_on_cpu():
_test_create_supervised_evaluator(evaluator_device="cuda")
_test_mocked_supervised_evaluator(evaluator_device="cuda")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.6.0"), reason="Skip if < 1.6.0")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_create_supervised_evaluator_on_cuda_amp():
model_device = evaluator_device = "cuda"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
_test_mocked_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device, amp_mode="amp")
def test_create_supervised_evaluator_amp_error(mock_torch_cuda_amp_module):
with pytest.raises(ImportError, match="Please install torch>=1.6.0 to use amp_mode='amp'."):
_test_create_supervised_evaluator(amp_mode="amp")
def test_create_supervised_evaluator_with_metrics():
model = DummyModel()
model.fc.weight.data.zero_()
evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()})
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [4.0]])
data = [(x, y)]
state = evaluator.run(data)
assert state.metrics["mse"] == 12.5
|
import functools
import gc
from unittest.mock import call, create_autospec, MagicMock
import pytest
from pytest import raises
from ignite.engine import Engine, Events, State
from ignite.engine.events import EventsList
class DummyEngine(Engine):
def __init__(self):
super(DummyEngine, self).__init__(lambda e, b: 1)
def run(self, num_times):
self.state = State()
for _ in range(num_times):
self.fire_event(Events.STARTED)
self.fire_event(Events.COMPLETED)
return self.state
def test_add_event_handler_raises_with_invalid_event():
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"is not a valid event for this Engine"):
engine.add_event_handler("incorrect", lambda engine: None)
def test_add_event_handler_raises_with_invalid_signature():
engine = Engine(MagicMock())
def handler(engine):
pass
engine.add_event_handler(Events.STARTED, handler)
engine.add_event_handler(Events.STARTED, handler, 1)
def handler_with_args(engine, a):
pass
engine.add_event_handler(Events.STARTED, handler_with_args, 1)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_args)
def handler_with_kwargs(engine, b=42):
pass
engine.add_event_handler(Events.STARTED, handler_with_kwargs, b=2)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_kwargs, c=3)
engine.add_event_handler(Events.STARTED, handler_with_kwargs, 1, b=2)
def handler_with_args_and_kwargs(engine, a, b=42):
pass
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, b=2)
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, 2, b=2)
with pytest.raises(ValueError):
engine.add_event_handler(Events.STARTED, handler_with_args_and_kwargs, 1, b=2, c=3)
def test_add_event_handler():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
def handle_iteration_started(engine, counter):
counter.count += 1
engine.add_event_handler(Events.STARTED, handle_iteration_started, started_counter)
completed_counter = Counter()
def handle_iteration_completed(engine, counter):
counter.count += 1
engine.add_event_handler(Events.COMPLETED, handle_iteration_completed, completed_counter)
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_add_event_handler_without_engine():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
def handle_iteration_started():
started_counter.count += 1
engine.add_event_handler(Events.STARTED, handle_iteration_started)
completed_counter = Counter()
def handle_iteration_completed(counter):
counter.count += 1
engine.add_event_handler(Events.COMPLETED, handle_iteration_completed, completed_counter)
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_adding_multiple_event_handlers():
mock_fn_1 = create_autospec(spec=lambda x: None)
mock_fn_2 = create_autospec(spec=lambda x: None)
engine = DummyEngine()
handlers = [mock_fn_1, mock_fn_2]
for handler in handlers:
engine.add_event_handler(Events.STARTED, handler)
engine.run(1)
for handler in handlers:
handler.assert_called_once_with(engine)
@pytest.mark.parametrize(
"event1, event2",
[
(Events.STARTED, Events.COMPLETED),
(Events.EPOCH_STARTED, Events.EPOCH_COMPLETED),
(Events.ITERATION_STARTED, Events.ITERATION_COMPLETED),
(Events.ITERATION_STARTED(every=2), Events.ITERATION_COMPLETED(every=2)),
],
)
def test_event_removable_handle(event1, event2):
# Removable handle removes event from engine.
engine = Engine(lambda e, b: None)
handler = create_autospec(spec=lambda x: None)
assert not hasattr(handler, "_parent")
removable_handle = engine.add_event_handler(event1, handler)
assert engine.has_event_handler(handler, event1)
engine.run([1, 2])
handler.assert_any_call(engine)
num_calls = handler.call_count
removable_handle.remove()
assert not engine.has_event_handler(handler, event1)
# Second engine pass does not fire handle again.
engine.run([1, 2])
# Assert that handler wasn't call
assert handler.call_count == num_calls
# Removable handle can be used as a context manager
handler = create_autospec(spec=lambda x: None)
with engine.add_event_handler(event1, handler):
assert engine.has_event_handler(handler, event1)
engine.run([1, 2])
assert not engine.has_event_handler(handler, event1)
handler.assert_any_call(engine)
num_calls = handler.call_count
engine.run([1, 2])
# Assert that handler wasn't call
assert handler.call_count == num_calls
# Removeable handle only effects a single event registration
handler = MagicMock(spec_set=True)
with engine.add_event_handler(event1, handler):
with engine.add_event_handler(event2, handler):
assert engine.has_event_handler(handler, event1)
assert engine.has_event_handler(handler, event2)
assert engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event2)
assert not engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event2)
# Removeable handle is re-enter and re-exitable
handler = MagicMock(spec_set=True)
remove = engine.add_event_handler(event1, handler)
with remove:
with remove:
assert engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event1)
assert not engine.has_event_handler(handler, event1)
# Removeable handle is a weakref, does not keep engine or event alive
def _add_in_closure():
_engine = Engine(lambda e, b: None)
def _handler(_):
pass
_handle = _engine.add_event_handler(event1, _handler)
assert _handle.engine() is _engine
if event1.filter is None:
assert _handle.handler() is _handler
else:
assert _handle.handler()._parent() is _handler
return _handle
removable_handle = _add_in_closure()
# gc.collect, resolving reference cycles in engine/state
# required to ensure object deletion in python2
gc.collect()
assert removable_handle.engine() is None
assert removable_handle.handler() is None
def test_events_list_removable_handle():
# Removable handle removes event from engine.
engine = DummyEngine()
handler = create_autospec(spec=lambda x: None)
assert not hasattr(handler, "_parent")
events_list = Events.STARTED | Events.COMPLETED
removable_handle = engine.add_event_handler(events_list, handler)
for e in events_list:
assert engine.has_event_handler(handler, e)
engine.run(1)
calls = [call(engine), call(engine)]
handler.assert_has_calls(calls)
assert handler.call_count == 2
removable_handle.remove()
for e in events_list:
assert not engine.has_event_handler(handler, e)
# Second engine pass does not fire handle again.
engine.run(1)
handler.assert_has_calls(calls)
assert handler.call_count == 2
# Removable handle can be used as a context manager
handler = create_autospec(spec=lambda x: None)
with engine.add_event_handler(events_list, handler):
for e in events_list:
assert engine.has_event_handler(handler, e)
engine.run(1)
for e in events_list:
assert not engine.has_event_handler(handler, e)
handler.assert_has_calls(calls)
assert handler.call_count == 2
engine.run(1)
handler.assert_has_calls(calls)
assert handler.call_count == 2
# Removeable handle only effects a single event registration
handler = create_autospec(spec=lambda x: None)
other_events_list = Events.EPOCH_STARTED | Events.EPOCH_COMPLETED
with engine.add_event_handler(events_list, handler):
with engine.add_event_handler(other_events_list, handler):
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in other_events_list:
assert engine.has_event_handler(handler, e)
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in other_events_list:
assert not engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
for e in other_events_list:
assert not engine.has_event_handler(handler, e)
# Removeable handle is re-enter and re-exitable
handler = create_autospec(spec=lambda x: None)
remove = engine.add_event_handler(events_list, handler)
with remove:
with remove:
for e in events_list:
assert engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
for e in events_list:
assert not engine.has_event_handler(handler, e)
# Removeable handle is a weakref, does not keep engine or event alive
def _add_in_closure():
_engine = DummyEngine()
def _handler(_):
pass
_handle = _engine.add_event_handler(events_list, _handler)
assert _handle.engine() is _engine
assert _handle.handler() is _handler
return _handle
removable_handle = _add_in_closure()
# gc.collect, resolving reference cycles in engine/state
# required to ensure object deletion in python2
gc.collect()
assert removable_handle.engine() is None
assert removable_handle.handler() is None
def test_eventslist__append_raises():
ev_list = EventsList()
with pytest.raises(TypeError, match=r"Argument event should be Events or CallableEventWithFilter"):
ev_list._append("abc")
def test_has_event_handler():
engine = DummyEngine()
handlers = [MagicMock(spec_set=True), MagicMock(spec_set=True)]
m = MagicMock(spec_set=True)
for handler in handlers:
engine.add_event_handler(Events.STARTED, handler)
engine.add_event_handler(Events.COMPLETED, m)
for handler in handlers:
assert engine.has_event_handler(handler, Events.STARTED)
assert engine.has_event_handler(handler)
assert not engine.has_event_handler(handler, Events.COMPLETED)
assert not engine.has_event_handler(handler, Events.EPOCH_STARTED)
assert not engine.has_event_handler(m, Events.STARTED)
assert engine.has_event_handler(m, Events.COMPLETED)
assert engine.has_event_handler(m)
assert not engine.has_event_handler(m, Events.EPOCH_STARTED)
def test_remove_event_handler():
engine = DummyEngine()
with pytest.raises(ValueError, match=r"Input event name"):
engine.remove_event_handler(lambda x: x, "an event")
def on_started(engine):
return 0
engine.add_event_handler(Events.STARTED, on_started)
with pytest.raises(ValueError, match=r"Input handler"):
engine.remove_event_handler(lambda x: x, Events.STARTED)
h1 = MagicMock(spec_set=True)
h2 = MagicMock(spec_set=True)
handlers = [h1, h2]
m = MagicMock(spec_set=True)
for handler in handlers:
engine.add_event_handler(Events.EPOCH_STARTED, handler)
engine.add_event_handler(Events.EPOCH_COMPLETED, m)
assert len(engine._event_handlers[Events.EPOCH_STARTED]) == 2
engine.remove_event_handler(h1, Events.EPOCH_STARTED)
assert len(engine._event_handlers[Events.EPOCH_STARTED]) == 1
assert engine._event_handlers[Events.EPOCH_STARTED][0][0] == h2
assert len(engine._event_handlers[Events.EPOCH_COMPLETED]) == 1
engine.remove_event_handler(m, Events.EPOCH_COMPLETED)
assert len(engine._event_handlers[Events.EPOCH_COMPLETED]) == 0
def test_args_and_kwargs_are_passed_to_event():
engine = DummyEngine()
kwargs = {"a": "a", "b": "b"}
args = (1, 2, 3)
handlers = []
for event in [Events.STARTED, Events.COMPLETED]:
handler = create_autospec(spec=lambda e, x1, x2, x3, a, b: None)
engine.add_event_handler(event, handler, *args, **kwargs)
handlers.append(handler)
engine.run(1)
called_handlers = [handle for handle in handlers if handle.called]
assert len(called_handlers) == 2
for handler in called_handlers:
handler_args, handler_kwargs = handler.call_args
assert handler_args[0] == engine
assert handler_args[1::] == args
assert handler_kwargs == kwargs
def test_on_decorator_raises_with_invalid_event():
engine = DummyEngine()
with pytest.raises(ValueError):
@engine.on("incorrect")
def f(engine):
pass
def test_on_decorator():
engine = DummyEngine()
class Counter(object):
def __init__(self, count=0):
self.count = count
started_counter = Counter()
@engine.on(Events.STARTED, started_counter)
def handle_iteration_started(engine, started_counter):
started_counter.count += 1
completed_counter = Counter()
@engine.on(Events.COMPLETED, completed_counter)
def handle_iteration_completed(engine, completed_counter):
completed_counter.count += 1
engine.run(15)
assert started_counter.count == 15
assert completed_counter.count == 15
def test_returns_state():
engine = Engine(MagicMock(return_value=1))
state = engine.run([0])
assert isinstance(state, State)
def test_state_attributes():
dataloader = [1, 2, 3]
engine = Engine(MagicMock(return_value=1))
state = engine.run(dataloader, max_epochs=3)
assert state.iteration == 9
assert state.output == 1
assert state.batch == 3
assert state.dataloader == dataloader
assert state.epoch == 3
assert state.max_epochs == 3
assert state.metrics == {}
with pytest.raises(RuntimeError, match=r"Unknown event name"):
state.get_event_attrib_value("abc")
def test_default_exception_handler():
update_function = MagicMock(side_effect=ValueError())
engine = Engine(update_function)
with raises(ValueError):
engine.run([1])
def test_custom_exception_handler():
value_error = ValueError()
update_function = MagicMock(side_effect=value_error)
engine = Engine(update_function)
class ExceptionCounter(object):
def __init__(self):
self.exceptions = []
def __call__(self, engine, e):
self.exceptions.append(e)
counter = ExceptionCounter()
engine.add_event_handler(Events.EXCEPTION_RAISED, counter)
engine.run([1])
# only one call from _run_once_over_data, since the exception is swallowed
assert len(counter.exceptions) == 1 and counter.exceptions[0] == value_error
def test_event_handlers_with_decoration():
engine = Engine(lambda e, b: b)
def decorated(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
return fun(*args, **kwargs)
return wrapper
values = []
def foo():
values.append("foo")
@decorated
def decorated_foo():
values.append("decorated_foo")
engine.add_event_handler(Events.EPOCH_STARTED, foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo)
engine.add_event_handler(Events.EPOCH_STARTED, decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), decorated_foo)
def foo_args(e):
values.append("foo_args")
values.append(e.state.iteration)
@decorated
def decorated_foo_args(e):
values.append("decorated_foo_args")
values.append(e.state.iteration)
engine.add_event_handler(Events.EPOCH_STARTED, foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo_args)
engine.add_event_handler(Events.EPOCH_STARTED, decorated_foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), decorated_foo_args)
class Foo:
def __init__(self):
self.values = []
def foo(self):
self.values.append("foo")
@decorated
def decorated_foo(self):
self.values.append("decorated_foo")
def foo_args(self, e):
self.values.append("foo_args")
self.values.append(e.state.iteration)
@decorated
def decorated_foo_args(self, e):
self.values.append("decorated_foo_args")
self.values.append(e.state.iteration)
foo = Foo()
engine.add_event_handler(Events.EPOCH_STARTED, foo.foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo.decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.decorated_foo)
engine.add_event_handler(Events.EPOCH_STARTED, foo.foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.foo_args)
engine.add_event_handler(Events.EPOCH_STARTED, foo.decorated_foo_args)
engine.add_event_handler(Events.EPOCH_STARTED(every=2), foo.decorated_foo_args)
engine.run([0], max_epochs=2)
assert values == foo.values
|
import sys
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import Timer
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def test_timer():
sleep_t = 0.2
n_iter = 3
def _train_func(engine, batch):
time.sleep(sleep_t)
def _test_func(engine, batch):
time.sleep(sleep_t)
trainer = Engine(_train_func)
tester = Engine(_test_func)
t_total = Timer()
t_batch = Timer(average=True)
t_train = Timer()
t_total.attach(trainer)
t_batch.attach(
trainer, pause=Events.ITERATION_COMPLETED, resume=Events.ITERATION_STARTED, step=Events.ITERATION_COMPLETED
)
t_train.attach(trainer, pause=Events.EPOCH_COMPLETED, resume=Events.EPOCH_STARTED)
@trainer.on(Events.EPOCH_COMPLETED)
def run_validation(trainer):
tester.run(range(n_iter))
# Run "training"
trainer.run(range(n_iter))
assert pytest.approx(t_total.value(), abs=1e-1) == 2 * n_iter * sleep_t
assert pytest.approx(t_batch.value(), abs=1e-1) == sleep_t
assert pytest.approx(t_train.value(), abs=1e-1) == n_iter * sleep_t
t_total.reset()
assert pytest.approx(t_total.value(), abs=1e-1) == 0.0
|
import pytest
import torch
@pytest.fixture()
def dummy_model_factory():
class DummyModel(torch.nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.fc2 = torch.nn.Linear(12, 12)
self.fc1.weight.data.zero_()
self.fc1.bias.data.zero_()
self.fc2.weight.data.fill_(1.0)
self.fc2.bias.data.fill_(1.0)
def get_dummy_model(with_grads=True, with_frozen_layer=False):
model = DummyModel()
if with_grads:
model.fc2.weight.grad = torch.zeros_like(model.fc2.weight)
model.fc2.bias.grad = torch.zeros_like(model.fc2.bias)
if not with_frozen_layer:
model.fc1.weight.grad = torch.zeros_like(model.fc1.weight)
model.fc1.bias.grad = torch.zeros_like(model.fc1.bias)
if with_frozen_layer:
for param in model.fc1.parameters():
param.requires_grad = False
return model
return get_dummy_model
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def do_nothing_update_fn(engine, batch):
pass
def test_args_validation():
trainer = Engine(do_nothing_update_fn)
with pytest.raises(ValueError, match=r"Argument patience should be positive integer."):
EarlyStopping(patience=-1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(ValueError, match=r"Argument min_delta should not be a negative number."):
EarlyStopping(patience=2, min_delta=-0.1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument score_function should be a function."):
EarlyStopping(patience=2, score_function=12345, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument trainer should be an instance of Engine."):
EarlyStopping(patience=2, score_function=lambda engine: 0, trainer=None)
def test_simple_early_stopping():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_state_dict():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
# Swap to new object, but maintain state
h2 = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
h2.load_state_dict(h.state_dict())
h2(None)
assert not trainer.should_terminate
h2(None)
assert trainer.should_terminate
def test_early_stopping_on_delta():
scores = iter([1.0, 2.0, 2.01, 3.0, 3.01, 3.02])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, min_delta=0.1, score_function=lambda _: next(scores), trainer=trainer)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 1.0; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.99; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_last_event_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=False, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_cumulative_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=True, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.6; counter == 0
assert not trainer.should_terminate
def test_simple_early_stopping_on_plateau():
def score_function(engine):
return 42
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=1, score_function=score_function, trainer=trainer)
# Call 2 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_simple_no_early_stopping():
scores = iter([1.0, 0.8, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if not stopped
assert not trainer.should_terminate
h(None)
h(None)
h(None)
assert not trainer.should_terminate
def test_with_engine_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 7
assert trainer.state.epoch == 7
def test_with_engine_early_stopping_on_plateau():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
def score_function(engine):
return 0.047
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=4, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 5
assert trainer.state.epoch == 5
def test_with_engine_no_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.23, 0.9, 1.0, 1.1, 1.253, 1.26, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=5, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 10
assert trainer.state.epoch == 10
def _test_distrib_with_engine_early_stopping(device):
if device is None:
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
torch.manual_seed(12)
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = torch.tensor([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9], requires_grad=False).to(device)
def score_function(engine):
i = trainer.state.epoch - 1
v = scores[i]
idist.all_reduce(v)
v /= idist.get_world_size()
return v.item()
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 7
assert n_epochs_counter.count == 7
def _test_distrib_integration_engine_early_stopping(device):
from ignite.metrics import Accuracy
if device is None:
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
metric_device = device
if device.type == "xla":
metric_device = "cpu"
rank = idist.get_rank()
ws = idist.get_world_size()
torch.manual_seed(12)
n_epochs = 10
n_iters = 20
y_preds = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
y_true = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
def update(engine, _):
e = trainer.state.epoch - 1
i = engine.state.iteration - 1
return y_preds[e][i, rank], y_true[e][i, rank]
evaluator = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(evaluator, "acc")
def score_function(engine):
return engine.state.metrics["acc"]
trainer = Engine(lambda e, b: None)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
data = list(range(n_iters))
evaluator.run(data=data)
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 5
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_with_engine_early_stopping, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_engine_early_stopping, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from unittest.mock import MagicMock
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
def test_global_step_from_engine():
iteration = 12
epoch = 23
trainer = Engine(lambda e, b: None)
trainer.state.iteration = iteration
trainer.state.epoch = epoch
gst = global_step_from_engine(trainer)
assert gst(MagicMock(), Events.EPOCH_COMPLETED) == epoch
gst = global_step_from_engine(trainer, custom_event_name=Events.ITERATION_COMPLETED)
assert gst(MagicMock(), Events.EPOCH_COMPLETED) == iteration
|
import os
from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EMAHandler
def _get_dummy_model() -> nn.Module:
model = nn.Linear(2, 1, bias=False)
model.weight.data.fill_(1)
return model
def _unwrap_model(model):
if isinstance(model, (DataParallel, DistributedDataParallel)):
return model.module
else:
return model
@pytest.fixture(scope="module")
def get_dummy_model():
"""Returns a function since the fixture is needed multiple times in a single test"""
yield _get_dummy_model
def _get_dummy_step_fn(model: Union[nn.Module, DataParallel, DistributedDataParallel]) -> Callable:
"""Get a dummy step function, given model is a (wrapper of) dummy model returned from _get_dummy_model"""
def step_fn(engine, batch):
"""Increment the weight by 1 at each iteration"""
_unwrap_model(model).weight.data.add_(1)
return 0
return step_fn
@pytest.mark.parametrize("momentum", [-1, 2])
def test_ema_invalid_momentum(get_dummy_model, momentum):
with pytest.raises(ValueError, match="Invalid momentum"):
EMAHandler(get_dummy_model(), momentum=momentum)
def test_has_momentum_scheduler(get_dummy_model):
"""Test the handler has attribute `momentum_scheduler` and `_momentum_lambda_obj`"""
momentum_warmup = 0.0
warmup_iters = 10
ema_handler = EMAHandler(get_dummy_model(), momentum_warmup=momentum_warmup, warmup_iters=warmup_iters)
assert hasattr(ema_handler, "momentum_scheduler")
assert hasattr(ema_handler, "_momentum_lambda_obj")
def test_ema_warmup_func(get_dummy_model):
"""Test the built-in linear warmup function for the EMA momentum"""
momentum = 0.5
momentum_warmup_1 = 0.0
momentum_warmup_2 = 1.0
warmup_iters = 5
def check_ema_momentum(engine: Engine, momentum_warmup, final_momentum, warmup_iters):
if engine.state.iteration == 1:
assert engine.state.ema_momentum == momentum_warmup
elif engine.state.iteration >= 1 + warmup_iters:
assert engine.state.ema_momentum == final_momentum
else:
min_momentum = min(momentum, momentum_warmup)
max_momentum = max(momentum, momentum_warmup)
assert min_momentum <= engine.state.ema_momentum <= max_momentum
# momentum_warmup < momentum
model_1 = get_dummy_model()
engine_1 = Engine(_get_dummy_step_fn(model_1))
ema_handler_1 = EMAHandler(model_1, momentum, momentum_warmup_1, warmup_iters)
ema_handler_1.attach(engine_1)
engine_1.add_event_handler(
Events.ITERATION_COMPLETED, check_ema_momentum, momentum_warmup_1, momentum, warmup_iters
)
engine_1.run(range(10))
# momentum_warmup > momentum
model_2 = get_dummy_model()
engine_2 = Engine(_get_dummy_step_fn(model_2))
ema_handler_2 = EMAHandler(model_2, momentum, momentum_warmup_2, warmup_iters)
ema_handler_2.attach(engine_2)
engine_2.add_event_handler(
Events.ITERATION_COMPLETED, check_ema_momentum, momentum_warmup_2, momentum, warmup_iters
)
engine_2.run(range(10))
def test_ema_invalid_model():
with pytest.raises(ValueError, match="model should be an instance of nn.Module or its subclasses"):
model = "Invalid Model"
EMAHandler(model) # type: ignore
@pytest.mark.distributed
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_ema_model_on_cuda(get_dummy_model):
"""Test if ema_handler.ema_model is nn.Module or nn.DataParallel and under eval mode"""
model = get_dummy_model().to(idist.device())
model = idist.auto_model(model)
ema_handler = EMAHandler(model)
ema_model = ema_handler.ema_model
assert not ema_model.training
if isinstance(model, DataParallel):
assert isinstance(ema_model, DataParallel)
else:
assert (
isinstance(ema_model, nn.Module)
and (not isinstance(ema_model, DataParallel))
and (not isinstance(ema_model, DistributedDataParallel))
)
def test_ema_load_state_dict(get_dummy_model):
model_1 = get_dummy_model()
model_1.weight.data.fill_(2)
state_dict_1 = model_1.state_dict()
model_2 = get_dummy_model()
ema_handler = EMAHandler(model_2)
ema_model = ema_handler.ema_model
ema_model.load_state_dict(state_dict_1)
assert ema_model.weight.data.allclose(model_1.weight.data)
def test_ema_get_const_momentum(get_dummy_model):
"""Test if momentum retrieved from the engine is constant and equal to the handler's momentum"""
model = get_dummy_model()
step_fn = _get_dummy_step_fn(model)
engine = Engine(step_fn)
def assert_const_momentum(engine: Engine, const_momentum):
assert engine.state.ema_momentum == const_momentum
ema_handler = EMAHandler(model, momentum=0.002)
ema_handler.attach(engine)
engine.add_event_handler(Events.ITERATION_COMPLETED, assert_const_momentum, ema_handler.momentum)
engine.run(range(10))
@pytest.mark.parametrize("handle_buffers", ["copy", "update", "ema_train", "invalid"])
def test_ema_buffer(handle_buffers):
"""Test if the tensors in buffer are also correctly updated"""
model = nn.BatchNorm2d(2)
model.running_mean.data.fill_(1.5)
model.running_var.data.fill_(1.5)
# manually register a buffer to test if it will be correctly updated
model.register_buffer("dummy_buffer", tensor=torch.tensor(1.0, dtype=torch.float32))
if handle_buffers == "invalid":
with pytest.raises(ValueError, match="handle_buffers can only"):
_ = EMAHandler(model, momentum=0.5, handle_buffers=handle_buffers)
else:
ema_handler = EMAHandler(model, momentum=0.5, handle_buffers=handle_buffers)
def _bn_step_fn(engine, batch):
x = torch.rand(4, 2, 32, 32)
_ = model(x)
# manually increment the dummy_buffer at every step
model.dummy_buffer += 1.0
return 1
engine = Engine(_bn_step_fn)
ema_handler.attach(engine)
ema_model = ema_handler.ema_model
if handle_buffers == "ema_train":
assert ema_model.training
else:
assert not ema_model.training
@engine.on(Events.ITERATION_COMPLETED)
def check_buffers():
if handle_buffers == "update":
# the buffers with torch.int64 data type should be directly copied
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
# buffers with floating type will be updated rather than copied
assert not ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert not ema_model.running_mean.allclose(model.running_mean)
assert not ema_model.running_var.allclose(model.running_var)
elif handle_buffers == "copy":
# the buffers with torch.int64 data type should be directly copied
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert ema_model.running_mean.allclose(model.running_mean)
assert ema_model.running_var.allclose(model.running_var)
else:
# buffers will not be copied or EMA updated
assert ema_model.num_batches_tracked.allclose(torch.tensor(0, dtype=torch.int64))
assert ema_model.dummy_buffer.allclose(torch.tensor(1.0, dtype=torch.float32))
# engine will run 4 iterations
engine.run([0, 1], max_epochs=2)
if handle_buffers == "update":
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(torch.tensor(4.0625, dtype=torch.float32))
assert not ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert not ema_model.running_mean.allclose(model.running_mean)
assert not ema_model.running_var.allclose(model.running_var)
elif handle_buffers == "copy":
assert ema_model.num_batches_tracked.allclose(model.num_batches_tracked)
assert ema_model.dummy_buffer.allclose(model.dummy_buffer)
assert ema_model.running_mean.allclose(model.running_mean)
assert ema_model.running_var.allclose(model.running_var)
else:
# buffers will not be copied or EMA updated
assert ema_model.num_batches_tracked.allclose(torch.tensor(0, dtype=torch.int64))
assert ema_model.dummy_buffer.allclose(torch.tensor(1.0, dtype=torch.float32))
def test_ema_two_handlers(get_dummy_model):
"""Test when two EMA handlers are attached to a trainer"""
model_1 = get_dummy_model()
ema_handler_1 = EMAHandler(model_1, momentum=0.5)
model_2 = get_dummy_model()
ema_handler_2 = EMAHandler(model_2, momentum=0.5)
def _step_fn(engine: Engine, batch: Any):
model_1.weight.data.add_(1)
model_2.weight.data.add_(1)
return 0
engine = Engine(_step_fn)
assert not hasattr(engine.state, "ema_momentum_1")
# handler_1 update EMA model of model_1 every 1 iteration
ema_handler_1.attach(engine, "ema_momentum_1", event=Events.ITERATION_COMPLETED)
assert hasattr(engine.state, "ema_momentum_1")
# handler_2 update EMA model for model_2 every 2 iterations
ema_handler_2.attach(engine, "ema_momentum_2", event=Events.ITERATION_COMPLETED(every=2))
assert hasattr(engine.state, "ema_momentum_2")
# engine will run 4 iterations
engine.run(range(2), max_epochs=2)
# explicitly cast to float32 to avoid test failure on XLA devices
ema_weight_1 = ema_handler_1.ema_model.weight.data.to(torch.float32)
ema_weight_2 = ema_handler_2.ema_model.weight.data.to(torch.float32)
assert ema_weight_1.allclose(ema_weight_1.new_full((1, 2), 4.0625))
assert ema_weight_2.allclose(ema_weight_2.new_full((1, 2), 3.5))
assert engine.state.ema_momentum_1 == 0.5
assert engine.state.ema_momentum_2 == 0.5
model_3 = get_dummy_model()
ema_handler_3 = EMAHandler(model_3)
with pytest.warns(UserWarning, match="Attribute 'ema_momentum_1' already exists"):
ema_handler_3.attach(engine, name="ema_momentum_1")
def _test_ema_final_weight(model, device=None, ddp=False, interval=1):
"""Test if final smoothed weights are correct"""
if device is None:
# let horovod decide the device
device = idist.device()
if isinstance(device, str):
device = torch.device(device)
model = model.to(device)
if ddp:
model = idist.auto_model(model)
step_fn = _get_dummy_step_fn(model)
engine = Engine(step_fn)
ema_handler = EMAHandler(model, momentum=0.5)
ema_handler.attach(engine, "model", event=Events.ITERATION_COMPLETED(every=interval))
# engine will run 4 iterations
engine.run(range(2), max_epochs=2)
# ema_model and model can be DP or DDP
# explicitly cast to float32 to avoid test failure on XLA devices
ema_weight = _unwrap_model(ema_handler.ema_model).weight.data.to(torch.float32)
model_weight = _unwrap_model(model).weight.data.to(torch.float32)
assert ema_weight.device == device
assert model_weight.device == device
if interval == 1:
assert ema_weight.allclose(ema_weight.new_full((1, 2), 4.0625))
elif interval == 2:
assert ema_weight.allclose(ema_weight.new_full((1, 2), 3.5))
else:
pass
assert model_weight.allclose(model_weight.new_full((1, 2), 5.0))
@pytest.mark.parametrize("interval", [1, 2])
def test_ema_final_weight_cpu(get_dummy_model, interval):
device = torch.device("cpu")
_test_ema_final_weight(get_dummy_model(), device=device, ddp=False, interval=interval)
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_final_weight_cuda(get_dummy_model, interval):
device = torch.device("cuda:0")
_test_ema_final_weight(get_dummy_model(), device=device, ddp=False, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_ema_final_weight_distrib_nccl_gpu(get_dummy_model, distributed_context_single_node_nccl, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_ema_final_weight_distrib_gloo_cpu_or_gpu(get_dummy_model, distributed_context_single_node_gloo, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_ema_final_weight_distrib_hvd(get_dummy_model, gloo_hvd_executor, interval):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
# pass device = None to the executor. Different from other distributed tests where the processes are
# already spawn in the context, the processes here will be explicitly spawn by the executor, so we
# pass None to the function, and call idist.device() in side the function to get the corresponding device
gloo_hvd_executor(_test_ema_final_weight, (get_dummy_model(), None, True, interval), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_ema_final_weight_distrib_single_device_xla(get_dummy_model):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_ema_final_weight_distrib_xla_nprocs(get_dummy_model, xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
def _test_ema_final_weight_xla_nprocs(index):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True)
xmp_executor(_test_ema_final_weight_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_ema_final_weight_distrib_multinode_gloo_cpu_or_gpu(
get_dummy_model, distributed_context_multi_node_gloo, interval
):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
@pytest.mark.multinode_distributed
@pytest.mark.parametrize("interval", [1, 2])
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_ema_final_weight_distrib_multinode_nccl_gpu(get_dummy_model, distributed_context_multi_node_nccl, interval):
device = idist.device()
_test_ema_final_weight(get_dummy_model(), device=device, ddp=True, interval=interval)
|
import re
from pathlib import Path
from unittest.mock import patch
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateScheduler,
PiecewiseLinearStateScheduler,
StepStateScheduler,
)
config1 = (3, [(2, 0), (5, 10)], True, [0.0, 0.0, 3.3333333333333335])
expected_hist2 = [0.0] * 10 + [float(i) for i in range(1, 11)] + [10.0] * 10
config2 = (30, [(10, 0), (20, 10)], True, expected_hist2)
config3 = (
PiecewiseLinearStateScheduler,
{"param_name": "linear_scheduled_param", "milestones_values": [(3, 12), (5, 10)], "create_new": True},
)
config4 = (
ExpStateScheduler,
{"param_name": "exp_scheduled_param", "initial_value": 10, "gamma": 0.99, "create_new": True},
)
config5 = (
MultiStepStateScheduler,
{
"param_name": "multistep_scheduled_param",
"initial_value": 10,
"gamma": 0.99,
"milestones": [3, 6],
"create_new": True,
},
)
if Version(torch.__version__) < Version("1.9.0"):
torch_testing_assert_close = torch.testing.assert_allclose
else:
torch_testing_assert_close = torch.testing.assert_close
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
config6 = (
LambdaStateScheduler,
{
"param_name": "custom_scheduled_param",
"lambda_obj": LambdaState(initial_value=10, gamma=0.99),
"create_new": True,
},
)
config7 = (
StepStateScheduler,
{"param_name": "step_scheduled_param", "initial_value": 10, "gamma": 0.99, "step_size": 5, "create_new": True},
)
@pytest.mark.parametrize("max_epochs, milestones_values, save_history, expected_param_history", [config1, config2])
def test_pwlinear_scheduler_linear_increase_history(
max_epochs, milestones_values, save_history, expected_param_history
):
# Testing linear increase
engine = Engine(lambda e, b: None)
pw_linear_step_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param",
milestones_values=milestones_values,
save_history=save_history,
create_new=True,
)
pw_linear_step_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
expected_param_history = expected_param_history
assert hasattr(engine.state, "param_history")
state_param = engine.state.param_history["pwlinear_scheduled_param"]
assert len(state_param) == len(expected_param_history)
assert state_param == expected_param_history
state_dict = pw_linear_step_parameter_scheduler.state_dict()
pw_linear_step_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)])])
def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values):
# Testing step_constant
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "pwlinear_scheduled_param"), float(milestones_values[0][1]))
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize(
"max_epochs, milestones_values, expected_val",
[(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0)],
)
def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expected_val):
# Testing linear increase
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="pwlinear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "pwlinear_scheduled_param"), expected_val, atol=0.001, rtol=0.0)
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])])
def test_pwlinear_scheduler_max_value(max_epochs, milestones_values):
# Testing max_value
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
param_name="linear_scheduled_param", milestones_values=milestones_values, create_new=True
)
linear_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "linear_scheduled_param"), float(milestones_values[-1][1]))
state_dict = linear_state_parameter_scheduler.state_dict()
linear_state_parameter_scheduler.load_state_dict(state_dict)
def test_piecewiselinear_asserts():
with pytest.raises(TypeError, match=r"Argument milestones_values should be a list or tuple"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=None)
with pytest.raises(ValueError, match=r"Argument milestones_values should be with at least one value"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5,)])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(10, 0.5), (0.6,)])
with pytest.raises(ValueError, match=r"Milestones should be increasing integers"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(10, 0.5), (5, 0.6)])
with pytest.raises(TypeError, match=r"Value of a milestone should be integer"):
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5, 1)])
@pytest.mark.parametrize("max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)])
def test_exponential_scheduler(max_epochs, initial_value, gamma):
engine = Engine(lambda e, b: None)
exp_state_parameter_scheduler = ExpStateScheduler(
param_name="exp_scheduled_param", initial_value=initial_value, gamma=gamma, create_new=True
)
exp_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(getattr(engine.state, "exp_scheduled_param"), initial_value * gamma**max_epochs)
state_dict = exp_state_parameter_scheduler.state_dict()
exp_state_parameter_scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)])
def test_step_scheduler(max_epochs, initial_value, gamma, step_size):
engine = Engine(lambda e, b: None)
step_state_parameter_scheduler = StepStateScheduler(
param_name="step_scheduled_param",
initial_value=initial_value,
gamma=gamma,
step_size=step_size,
create_new=True,
)
step_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(
getattr(engine.state, "step_scheduled_param"), initial_value * gamma ** (max_epochs // step_size)
)
state_dict = step_state_parameter_scheduler.state_dict()
step_state_parameter_scheduler.load_state_dict(state_dict)
from bisect import bisect_right
@pytest.mark.parametrize(
"max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])]
)
def test_multistep_scheduler(max_epochs, initial_value, gamma, milestones):
engine = Engine(lambda e, b: None)
multi_step_state_parameter_scheduler = MultiStepStateScheduler(
param_name="multistep_scheduled_param",
initial_value=initial_value,
gamma=gamma,
milestones=milestones,
create_new=True,
)
multi_step_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=max_epochs)
torch_testing_assert_close(
getattr(engine.state, "multistep_scheduled_param"),
initial_value * gamma ** bisect_right(milestones, max_epochs),
)
state_dict = multi_step_state_parameter_scheduler.state_dict()
multi_step_state_parameter_scheduler.load_state_dict(state_dict)
def test_custom_scheduler():
engine = Engine(lambda e, b: None)
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
lambda_state_parameter_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
engine.run([0] * 8, max_epochs=20)
torch_testing_assert_close(
getattr(engine.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(20)
)
state_dict = lambda_state_parameter_scheduler.state_dict()
lambda_state_parameter_scheduler.load_state_dict(state_dict)
def test_custom_scheduler_asserts():
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
with pytest.raises(ValueError, match=r"Expected lambda_obj to be callable."):
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
@pytest.mark.parametrize("scheduler_cls, scheduler_kwargs", [config3, config4, config5, config6])
def test_simulate_and_plot_values(scheduler_cls, scheduler_kwargs):
import matplotlib
matplotlib.use("Agg")
event = Events.EPOCH_COMPLETED
max_epochs = 2
data = [0] * 10
scheduler = scheduler_cls(**scheduler_kwargs)
trainer = Engine(lambda engine, batch: None)
scheduler.attach(trainer, event)
trainer.run(data, max_epochs=max_epochs)
# launch plot values
scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
@pytest.mark.parametrize("save_history", [False, True])
@pytest.mark.parametrize("scheduler_cls, scheduler_kwargs", [config3, config4, config5, config6])
def test_simulate_values(scheduler_cls, scheduler_kwargs, save_history):
max_epochs = 2
data = [0] * 10
scheduler_kwargs["save_history"] = save_history
scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
def test_torch_save_load(dirname):
lambda_state_parameter_scheduler = LambdaStateScheduler(
param_name="custom_scheduled_param", lambda_obj=LambdaState(initial_value=10, gamma=0.99), create_new=True
)
filepath = Path(dirname) / "dummy_lambda_state_parameter_scheduler.pt"
torch.save(lambda_state_parameter_scheduler, filepath)
loaded_lambda_state_parameter_scheduler = torch.load(filepath)
engine1 = Engine(lambda e, b: None)
lambda_state_parameter_scheduler.attach(engine1, Events.EPOCH_COMPLETED)
engine1.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine1.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
engine2 = Engine(lambda e, b: None)
loaded_lambda_state_parameter_scheduler.attach(engine2, Events.EPOCH_COMPLETED)
engine2.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
getattr(engine2.state, "custom_scheduled_param"), LambdaState(initial_value=10, gamma=0.99)(2)
)
torch_testing_assert_close(
getattr(engine1.state, "custom_scheduled_param"), getattr(engine2.state, "custom_scheduled_param")
)
def test_simulate_and_plot_values_no_matplotlib():
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed."):
with patch.dict("sys.modules", {"matplotlib.pyplot": None}):
event = Events.EPOCH_COMPLETED
max_epochs = 2
data = [0] * 10
kwargs = {
"param_name": "multistep_scheduled_param",
"initial_value": 10,
"gamma": 0.99,
"milestones": [3, 6],
"create_new": True,
}
scheduler = MultiStepStateScheduler(**kwargs)
trainer = Engine(lambda engine, batch: None)
scheduler.attach(trainer, event)
trainer.run(data, max_epochs=max_epochs)
# launch plot values
MultiStepStateScheduler.plot_values(num_events=len(data) * max_epochs, **kwargs)
def test_multiple_scheduler_with_save_history():
engine_multiple_schedulers = Engine(lambda e, b: None)
configs = [config3, config4, config5, config6, config7]
for scheduler, config in configs:
if "save_history" in config:
del config["save_history"]
_scheduler = scheduler(**config, save_history=True)
_scheduler.attach(engine_multiple_schedulers)
engine_multiple_schedulers.run([0] * 8, max_epochs=2)
for scheduler, config in configs:
engine = Engine(lambda e, b: None)
_scheduler = scheduler(**config, save_history=True)
_scheduler.attach(engine)
engine.run([0] * 8, max_epochs=2)
torch_testing_assert_close(
engine_multiple_schedulers.state.param_history[config["param_name"]],
engine.state.param_history[config["param_name"]],
)
def test_docstring_examples():
# LambdaStateScheduler
engine = Engine(lambda e, b: None)
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
param_scheduler = LambdaStateScheduler(param_name="param", lambda_obj=LambdaState(10, 0.99), create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
# PiecewiseLinearStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = PiecewiseLinearStateScheduler(
param_name="param", milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)], create_new=True
)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=40)
# ExpStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = ExpStateScheduler(param_name="param", initial_value=10, gamma=0.99, create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=2)
# StepStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = StepStateScheduler(param_name="param", initial_value=10, gamma=0.99, step_size=5, create_new=True)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=10)
# MultiStepStateScheduler
engine = Engine(lambda e, b: None)
param_scheduler = MultiStepStateScheduler(
param_name="param", initial_value=10, gamma=0.99, milestones=[3, 6], create_new=True
)
param_scheduler.attach(engine, Events.EPOCH_COMPLETED)
engine.run([0] * 8, max_epochs=10)
def test_param_scheduler_attach_exception():
trainer = Engine(lambda e, b: None)
param_name = "state_param"
setattr(trainer.state, param_name, None)
save_history = True
create_new = True
param_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name,
milestones_values=[(0, 0.0), (10, 0.999)],
save_history=save_history,
create_new=create_new,
)
with pytest.raises(
ValueError,
match=r"Attribute '" + re.escape(param_name) + "' already exists in the engine.state. "
r"This may be a conflict between multiple handlers. "
r"Please choose another name.",
):
param_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
def test_param_scheduler_attach_warning():
trainer = Engine(lambda e, b: None)
param_name = "state_param"
save_history = True
create_new = False
param_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name,
milestones_values=[(0, 0.0), (10, 0.999)],
save_history=save_history,
create_new=create_new,
)
with pytest.warns(
UserWarning,
match=r"Attribute '" + re.escape(param_name) + "' is not defined in the engine.state. "
r"PiecewiseLinearStateScheduler will create it. Remove this warning by setting create_new=True.",
):
param_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
def test_param_scheduler_with_ema_handler():
from ignite.handlers import EMAHandler
model = nn.Linear(2, 1)
trainer = Engine(lambda e, b: model(b))
data = torch.rand(100, 2)
param_name = "ema_decay"
ema_handler = EMAHandler(model)
ema_handler.attach(trainer, name=param_name, event=Events.ITERATION_COMPLETED)
ema_decay_scheduler = PiecewiseLinearStateScheduler(
param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999)], save_history=True
)
ema_decay_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=20)
|
import time
import pytest
from ignite.engine import Engine, Events
from ignite.handlers import TimeLimit
def test_arg_validation():
with pytest.raises(ValueError, match=r"Argument limit_sec should be a positive integer."):
TimeLimit(limit_sec=-5)
with pytest.raises(TypeError, match=r"Argument limit_sec should be an integer."):
TimeLimit(limit_sec="abc")
def _train_func(engine, batch):
time.sleep(1)
@pytest.mark.parametrize("n_iters, limit", [(20, 10), (5, 10)])
def test_terminate_on_time_limit(n_iters, limit):
started = time.time()
trainer = Engine(_train_func)
@trainer.on(Events.TERMINATE)
def _():
trainer.state.is_terminated = True
trainer.add_event_handler(Events.ITERATION_COMPLETED, TimeLimit(limit))
trainer.state.is_terminated = False
trainer.run(range(n_iters))
elapsed = round(time.time() - started)
assert elapsed <= limit + 1
assert trainer.state.is_terminated == (n_iters > limit)
|
# Needed to collect coverage data
|
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ExponentialLR, StepLR
from ignite.engine import Engine, Events
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
ReduceLROnPlateauScheduler,
)
from tests.ignite.contrib.handlers import MockFP16DeepSpeedZeroOptimizer
try:
from torch.optim.lr_scheduler import MultiplicativeLR
except ImportError:
has_multiplicative_lr = False
else:
from packaging.version import Version
# https://github.com/pytorch/pytorch/issues/32756
has_multiplicative_lr = Version(torch.__version__) >= Version("1.5.0")
class FakeParamScheduler(ParamScheduler):
def get_param(self):
return [0]
def test_param_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler = FakeParamScheduler(optimizer, "lr")
with pytest.raises(ValueError, match=r"size of value is different than optimizer_param_groups"):
lr_scheduler(None)
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary, but given"):
lr_scheduler.load_state_dict(None)
with pytest.raises(ValueError, match=r"Required state attribute 'event_index' is absent in provided state_dict"):
lr_scheduler.load_state_dict({})
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
FakeParamScheduler({}, "lr")
def test_linear_scheduler():
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
LinearCyclicalScheduler({}, "lr", 1, 0, cycle_size=0)
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=1)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4, # 0.6, 0.8,
],
)
)
scheduler.load_state_dict(state_dict)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, cycle_mult=2)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 10, max_epochs=3)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
],
)
)
scheduler.load_state_dict(state_dict)
# With float cycle_size
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(
optimizer, "lr", start_value=1.2, end_value=0.2, cycle_size=10.00000012, cycle_mult=1.0
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6,
0.8,
1.0,
# Cycle 2
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6, # 0.8, 1.0,
],
)
)
scheduler.load_state_dict(state_dict)
def test_linear_scheduler_cycle_size_two():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=2)
data = [0] * 10
max_epochs = 2
simulated_values = LinearCyclicalScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=1, end_value=0, cycle_size=2
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
)
)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_cosine_annealing_scheduler():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = CosineAnnealingScheduler(optimizer, "lr", 0, 1, 10)
state_dict = scheduler.state_dict()
data = [0] * 9
max_epochs = 2
simulated_values = CosineAnnealingScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=0, end_value=1, cycle_size=10
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365, # 0.9045084971874737, 0.9755282581475768
],
)
)
scheduler.load_state_dict(state_dict)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_concat_scheduler_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(TypeError, match=r"Argument schedulers should be a sequence"):
ConcatScheduler(schedulers=None, durations=[])
with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
ConcatScheduler(schedulers=[], durations=[])
with pytest.raises(ValueError, match=r"Argument schedulers should be of more than one parameter schedulers"):
ConcatScheduler(schedulers=[scheduler_1], durations=[10])
with pytest.raises(TypeError, match=r"Value at index 1 of schedulers should be a parameter scheduler"):
ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])
with pytest.raises(ValueError, match=r"Incorrect number schedulers or duration values"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])
with pytest.raises(ValueError, match=r"Argument durations should be list/tuple of integers"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])
with pytest.raises(TypeError, match=r"Argument durations should be list/tuple"):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc")
with pytest.raises(TypeError, match=r"Argument param_names should be list or tuple"):
ConcatScheduler.simulate_values(
num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names="abc"
)
with pytest.raises(ValueError, match=r"Argument param_names should be list or tuple of strings"):
ConcatScheduler.simulate_values(
num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names=[1]
)
optimizer_2 = torch.optim.SGD([tensor], lr=0)
scheduler_3 = CosineAnnealingScheduler(optimizer_2, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
ConcatScheduler([scheduler_1, scheduler_3], durations=[30])
scheduler_4 = CosineAnnealingScheduler(optimizer, "lr2", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same param_name"):
ConcatScheduler([scheduler_1, scheduler_4], durations=[30])
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30])
def test_concat_scheduler_state_dict():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)
state_dict = concat_scheduler.state_dict()
assert state_dict["durations"] == durations
assert state_dict["_current_duration"] == durations[0]
assert state_dict["_scheduler_index"] == 0
for _ in range(20):
concat_scheduler(None, None)
concat_scheduler.load_state_dict(state_dict)
assert concat_scheduler.durations == durations
assert concat_scheduler._current_duration == durations[0]
assert id(concat_scheduler._current_scheduler) == id(scheduler_1)
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
concat_scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of concatenated schedulers"):
concat_scheduler.load_state_dict({"schedulers": []})
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary, but given"):
concat_scheduler.load_state_dict(None)
@pytest.mark.parametrize("duration_vals_as_np_int", [False, True])
def test_concat_scheduler_two_schedulers(duration_vals_as_np_int):
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
if duration_vals_as_np_int:
durations = [np.int64(t) for t in durations]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the LinearCyclicalScheduler
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 1 of the CosineAnnealingScheduler
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_concat_scheduler_two_linear():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.0, end_value=0.1, cycle_size=2)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.2, end_value=1.0, cycle_size=2)
durations = [5]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)
state_dict = concat_scheduler.state_dict()
assert concat_scheduler.get_param() == 0.0
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# first LinearCyclicalScheduler
0.0,
0.1,
0.0,
0.1,
0.0,
# second LinearCyclicalScheduler
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_concat_scheduler_3_schedulers():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.5, cycle_size=20)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.45, cycle_size=10)
scheduler_3 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.0, cycle_size=20)
durations = [10, 5]
concat_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations, save_history=True
)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the first LinearCyclicalScheduler
1.0,
0.95,
0.9,
0.85,
0.8,
0.75,
0.7,
0.65,
0.6,
0.55,
# Cycle 1 of the second LinearCyclicalScheduler
0.5,
0.49,
0.48,
0.47,
0.46,
# Cycle 1 of the third LinearCyclicalScheduler
0.5,
0.45,
0.4,
0.35,
0.3,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
trainer.state.param_history = None
def test_save_param_history():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, save_history=True)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
assert not hasattr(trainer.state, "param_history")
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
trainer.run([0] * 10, max_epochs=2)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
def test_lr_scheduler_asserts():
err_msg = r"Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler.(_LRScheduler|LRScheduler)"
with pytest.raises(TypeError, match=err_msg):
LRScheduler(123)
with pytest.raises(TypeError, match=err_msg):
LRScheduler.simulate_values(1, None)
@pytest.mark.parametrize(
"torch_lr_scheduler_cls, kwargs",
[
(StepLR, ({"step_size": 5, "gamma": 0.5})),
(ExponentialLR, ({"gamma": 0.78})),
(MultiplicativeLR if has_multiplicative_lr else None, ({"lr_lambda": lambda epoch: 0.95})),
],
)
def test_lr_scheduler(torch_lr_scheduler_cls, kwargs):
if torch_lr_scheduler_cls is None:
return
tensor = torch.zeros([1], requires_grad=True)
optimizer1 = torch.optim.SGD([tensor], lr=0.01)
optimizer2 = torch.optim.SGD([tensor], lr=0.01)
optimizer3 = torch.optim.SGD([tensor], lr=0.01)
opt_state_dict1 = optimizer1.state_dict()
opt_state_dict2 = optimizer2.state_dict()
opt_state_dict3 = optimizer3.state_dict()
torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
scheduler1 = LRScheduler(torch_lr_scheduler1)
state_dict1 = scheduler1.state_dict()
torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
with pytest.warns(UserWarning, match=r"the first lr value from the optimizer, otherwise it will be skipped"):
scheduler2 = LRScheduler(torch_lr_scheduler2, use_legacy=True)
state_dict2 = scheduler2.state_dict()
torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
state_dict3 = torch_lr_scheduler3.state_dict()
def dummy_update(engine, batch):
optimizer1.step()
optimizer2.step()
optimizer3.step()
trainer = Engine(dummy_update)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
@trainer.on(Events.ITERATION_STARTED)
def save_lr1(engine):
lrs1.append(optimizer1.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_lr2(engine):
lrs2.append(optimizer2.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_true_lr(engine):
lrs_true.append(optimizer3.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_COMPLETED)
def torch_lr_scheduler_step(engine):
torch_lr_scheduler3.step()
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler2)
for _ in range(2):
lrs1 = []
lrs2 = []
lrs_true = []
data = [0] * 10
max_epochs = 2
trainer.run(data, max_epochs=max_epochs)
assert lrs_true == pytest.approx(lrs1), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs1} ({len(lrs1)})"
assert lrs_true == pytest.approx(lrs2), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs2} ({len(lrs2)})"
optimizer1.load_state_dict(opt_state_dict1)
scheduler1.load_state_dict(state_dict1)
optimizer2.load_state_dict(opt_state_dict2)
scheduler2.load_state_dict(state_dict2)
optimizer3.load_state_dict(opt_state_dict3)
torch_lr_scheduler3.load_state_dict(state_dict3)
optimizer4 = torch.optim.SGD([tensor], lr=0.01)
torch_lr_scheduler4 = torch_lr_scheduler_cls(optimizer=optimizer4, **kwargs)
simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler4)
assert lrs1 == pytest.approx([v for i, v in simulated_values])
assert lrs2 == pytest.approx([v for i, v in simulated_values])
def test_piecewiselinear_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
with pytest.raises(TypeError, match=r"Argument milestones_values should be a list or tuple"):
PiecewiseLinear(optimizer, "lr", milestones_values=None)
with pytest.raises(ValueError, match=r"Argument milestones_values should be with at least one value"):
PiecewiseLinear(optimizer, "lr", milestones_values=[])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5,)])
with pytest.raises(ValueError, match=r"Argument milestones_values should be a list of pairs"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (0.6,)])
with pytest.raises(ValueError, match=r"Milestones should be increasing integers"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (5, 0.6)])
with pytest.raises(TypeError, match=r"Value of a milestone should be integer"):
PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5, 1)])
@pytest.mark.parametrize("milestones_as_np_int", [True, False])
def test_piecewiselinear(milestones_as_np_int):
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
milestones_values = [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]
if milestones_as_np_int:
milestones_values = [(np.int64(t), v) for t, v in milestones_values]
scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 25, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.85,
0.9,
0.95,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
],
)
)
scheduler.load_state_dict(state_dict)
def test_simulate_and_plot_values():
import matplotlib
matplotlib.use("Agg")
def _test(scheduler_cls, **scheduler_kwargs):
if scheduler_cls == LRScheduler:
optimizer = scheduler_kwargs["lr_scheduler"].optimizer
elif scheduler_cls == ConcatScheduler:
optimizer = scheduler_kwargs["optimizer"]
del scheduler_kwargs["optimizer"]
else:
tensor = torch.zeros([1], requires_grad=True)
scheduler_kwargs["optimizer"] = torch.optim.SGD([tensor], lr=0.1)
optimizer = scheduler_kwargs["optimizer"]
max_epochs = 2
data = [0] * 10
simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
scheduler = scheduler_cls(**scheduler_kwargs)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in simulated_values])
# reexecute to check if no internal changes
# simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,
# save_history=True, # this will be removed
# **scheduler_kwargs)
# assert lrs == pytest.approx([v for i, v in simulated_values])
# launch plot values
scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
# LinearCyclicalScheduler
_test(LinearCyclicalScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# CosineAnnealingScheduler
_test(CosineAnnealingScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# LRScheduler
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.1)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.5)
_test(LRScheduler, lr_scheduler=torch_lr_scheduler)
# ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler]
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=20)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# ConcatScheduler = [LinearCyclicalScheduler, LRScheduler]
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5)
scheduler_1 = LRScheduler(torch_lr_scheduler)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.1, end_value=0.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# PiecewiseLinear
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
_test(
PiecewiseLinear,
optimizer=optimizer,
param_name="lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
)
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed."):
with patch.dict("sys.modules", {"matplotlib.pyplot": None}):
_test(
PiecewiseLinear,
optimizer=optimizer,
param_name="lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
)
def test_create_lr_scheduler_with_warmup_asserts():
with pytest.raises(TypeError, match=r"Argument lr_scheduler should be a subclass of"):
create_lr_scheduler_with_warmup(12, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10)
t1 = torch.zeros([1], requires_grad=True)
# A) opt lr != warmup_end_value
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=1
)
with pytest.raises(TypeError, match=r"Argument warmup_duration should be integer"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration="abc"
)
with pytest.raises(TypeError, match=r"Argument output_simulated_values should be a list of None"):
simulated_values = ()
create_lr_scheduler_with_warmup(
torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=10,
output_simulated_values=simulated_values,
)
@pytest.mark.parametrize(
"lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value",
[
# A) opt lr != warmup_end_value
("ExponentialLR", 0.01, 0.05, 10, 0.2),
("ExponentialLR", 0.01, 0.05, 2, 0.2),
# B) opt lr == warmup_end_value
("ExponentialLR", 0.01, 0.2, 10, 0.2 * 0.98),
("ExponentialLR", 0.01, 0.2, 2, 0.2 * 0.98),
# C) lr_scheduler start_value != warmup_end_value
("LinearCyclicalScheduler", 0.01, 0.05, 10, 0.8),
("LinearCyclicalScheduler", 0.01, 0.05, 2, 0.8),
# D) lr_scheduler start_value == warmup_end_value
("LinearCyclicalScheduler", 0.01, 0.8, 10, 0.8 - (0.8 / 5.0)),
("LinearCyclicalScheduler", 0.01, 0.8, 2, 0.8 - (0.8 / 5.0)),
# E) warmup_end_value is None: fall back to case B)
("ExponentialLR", 0.01, None, 10, 0.2 * 0.98),
],
)
def test_create_lr_scheduler_with_warmup(
lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value
):
t1 = torch.zeros([1], requires_grad=True)
if lr_scheduler_name == "ExponentialLR":
optimizer = torch.optim.SGD([t1], lr=0.2)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
elif lr_scheduler_name == "LinearCyclicalScheduler":
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
else:
raise ValueError(f"Unknown name: {lr_scheduler_name}")
num_iterations = 10
max_epochs = 20
if warmup_end_value is None:
expected_warmup_end_value = optimizer.param_groups[0]["lr"]
else:
expected_warmup_end_value = warmup_end_value
simulated_values = [None] * (num_iterations * max_epochs)
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=warmup_start_value,
warmup_end_value=warmup_end_value,
warmup_duration=warmup_duration,
output_simulated_values=simulated_values,
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_STARTED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for _, v in simulated_values])
assert lrs[0] == pytest.approx(warmup_start_value), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration - 1] == pytest.approx(
expected_warmup_end_value
), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration] == pytest.approx(
warmup_end_next_value
), f"lrs={lrs[: warmup_duration + num_iterations]}"
scheduler.load_state_dict(state_dict)
@pytest.mark.parametrize("save_history", [False, True])
def test_create_lr_scheduler_with_warmup_on_combined_scheduler(save_history):
# Test with a complex scheduler
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
max_epochs = 25
lr_max_value = 0.4
num_iterations_per_epoch = 128
num_iterations = max_epochs * num_iterations_per_epoch
warmup_duration = 5 * num_iterations_per_epoch
cooldown_duration = 5 * num_iterations_per_epoch
scheduler_1 = LinearCyclicalScheduler(
optimizer,
"lr",
start_value=lr_max_value,
end_value=lr_max_value * 0.9,
cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2,
)
scheduler_2 = LinearCyclicalScheduler(
optimizer, "lr", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2
)
lr_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2],
durations=[num_iterations - warmup_duration - cooldown_duration],
save_history=False,
)
lr_values = [None] * num_iterations
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=lr_max_value,
warmup_duration=warmup_duration,
save_history=save_history,
output_simulated_values=lr_values,
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations_per_epoch
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in lr_values])
if save_history:
param_history = trainer.state.param_history["lr"]
assert lrs == pytest.approx([v[0] for v in param_history])
trainer.state.param_history = None
scheduler.load_state_dict(state_dict)
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):
model = dummy_model_factory(with_grads=False, with_frozen_layer=False)
init_lr = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)
scaled_lr = 0.02
warmup_duration = 5
step_size = 2
gamma = 0.97
output_simulated_values = [None] * 50
create_lr_scheduler_with_warmup(
torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),
warmup_start_value=0.0,
warmup_end_value=scaled_lr,
warmup_duration=warmup_duration,
output_simulated_values=output_simulated_values,
)
assert output_simulated_values[0] == [0, 0.0]
assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]
assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]
v = [warmup_duration + step_size, init_lr * gamma]
assert output_simulated_values[warmup_duration + step_size] == v
def test_param_group_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
with pytest.raises(TypeError, match=r"Argument schedulers should be a list/tuple"):
ParamGroupScheduler(schedulers=None, names=["a", "b", "c"])
with pytest.raises(ValueError, match=r"Argument schedulers should be a list/tuple of parameter schedulers"):
ParamGroupScheduler(schedulers=[0, 1, 2], names=["a", "b", "c"])
with pytest.raises(ValueError, match=r"Argument schedulers should be a list/tuple of parameter schedulers"):
ParamGroupScheduler(schedulers=[lr_scheduler1, "2"], names=["a", "b"])
with pytest.raises(TypeError, match=r"Argument names should be a list/tuple"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names="ab")
with pytest.raises(ValueError, match=r"Argument names should be a list/tuple of parameter scheduler's names"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=[1, 2])
with pytest.raises(ValueError, match=r"\d should be equal \d"):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a"])
scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a", "b"])
with pytest.raises(TypeError, match=r"Argument state_dict should be a dictionary"):
scheduler.load_state_dict(None)
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of param group schedulers"):
scheduler.load_state_dict({"schedulers": []})
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
scheduler.load_state_dict({})
with pytest.raises(
ValueError, match=r"Name of scheduler from input state dict does not " r"correspond to required one"
):
scheduler.load_state_dict({"schedulers": [("a", lr_scheduler1.state_dict()), ("bad_name", {})]})
@pytest.mark.parametrize("param_groups_setting", ["single_optim", "multi_optim"])
def test_param_group_scheduler(param_groups_setting):
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
if param_groups_setting == "single_optim":
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
else:
optimizer_1 = torch.optim.SGD(params=[t1], lr=0.1)
optimizer_2 = torch.optim.SGD(params=[t2], lr=0.1)
lr_scheduler1 = LinearCyclicalScheduler(optimizer_1, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
lr_scheduler2 = LinearCyclicalScheduler(optimizer_2, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
lr_schedulers = [lr_scheduler1, lr_scheduler2]
num_iterations = 10
max_epochs = 20
scheduler = ParamGroupScheduler(lr_schedulers, names=[f"s_{i}" for i in range(len(lr_schedulers))])
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
lrs = []
@trainer.on(Events.ITERATION_STARTED, lrs)
def save_lr(_, lrs):
lrs.append(scheduler.get_param())
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations
for _ in range(2):
lrs.clear()
trainer.run(data, max_epochs=max_epochs)
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
scheduler.load_state_dict(state_dict)
values = ParamGroupScheduler.simulate_values(max_epochs * num_iterations, lr_schedulers)
assert [lr[1] for lr in values] == pytest.approx([lr[2] for lr in values])
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in values])
@pytest.mark.parametrize(
"scheduler_cls, kwargs",
[
(LinearCyclicalScheduler, {"param_name": "lr", "start_value": 1.0, "end_value": 0.0, "cycle_size": 10}),
(
PiecewiseLinear,
{"param_name": "lr", "milestones_values": [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]},
),
(CosineAnnealingScheduler, {"param_name": "lr", "start_value": 0.0, "end_value": 1.0, "cycle_size": 10}),
(ExponentialLR, {"gamma": 0.98}),
(StepLR, {"step_size": 50, "gamma": 0.5}),
],
)
def test_scheduler_with_param_groups(scheduler_cls, kwargs):
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler = scheduler_cls(optimizer, **kwargs)
if not isinstance(lr_scheduler, ParamScheduler):
lr_scheduler = LRScheduler(lr_scheduler)
num_iterations = 10
max_epochs = 20
state_dict = lr_scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr():
lrs.append((optimizer.param_groups[0]["lr"], optimizer.param_groups[1]["lr"]))
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
lr_scheduler.load_state_dict(state_dict)
def test_lr_scheduling_on_non_torch_optimizers():
# tests https://github.com/pytorch/ignite/issues/1162
optimizer = MagicMock()
optimizer.param_groups = [{"params": 0}]
FakeParamScheduler(optimizer, "lr")
tensor = torch.zeros([1], requires_grad=True)
base_optimizer = torch.optim.SGD([tensor], lr=0)
optimizer = MockFP16DeepSpeedZeroOptimizer(base_optimizer)
milestones_values = [(5, 0.5), (15, 1.0)]
scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
lrs = []
trainer.run([0] * 15, max_epochs=1)
assert lrs == list(
map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
)
def test_reduce_lr_on_plateau_scheduler():
tensor1 = torch.zeros([1], requires_grad=True)
tensor2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": [tensor1]}, {"params": [tensor2]}], lr=1)
data = [0] * 8
max_epochs = 10
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluate():
evaluator.run(data)
scheduler = ReduceLROnPlateauScheduler(
optimizer,
metric_name="acc",
mode="max",
factor=0.5,
patience=1,
threshold_mode="abs",
threshold=1.99,
min_lr=1e-7,
save_history=True,
trainer=trainer,
param_group_index=0,
)
evaluator = Engine(lambda engine, batch: None)
evaluator.state.metrics = {"acc": 0.0}
generate_acc = iter([3, 7, 7, 9, 10, 11, 8, 8, 4, 7])
@evaluator.on(Events.COMPLETED)
def set_acc():
evaluator.state.metrics["acc"] = next(generate_acc)
evaluator.add_event_handler(Events.COMPLETED, scheduler)
trainer.run(data, max_epochs=max_epochs)
lrs = [param[0] for param in trainer.state.param_history["lr"]]
assert lrs == list(
map(
pytest.approx,
[1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.25],
)
)
assert optimizer.param_groups[1]["lr"] == 1
values = ReduceLROnPlateauScheduler.simulate_values(
5, [10, 9, 9, 9, 8.1], 1.0, save_history=True, factor=0.5, patience=2, threshold=0.1
)
values = np.array(values)[:, 1].tolist()
assert values == list(
map(
pytest.approx,
[1.0, 1.0, 1.0, 0.5, 0.5],
)
)
def test_reduce_lr_on_plateau_scheduler_asserts():
tensor1 = torch.zeros([1], requires_grad=True)
tensor2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": [tensor1]}, {"params": [tensor2]}], lr=1)
with pytest.raises(TypeError, match=r"When param_group_index is given, min_lr should be a float, but given"):
ReduceLROnPlateauScheduler(
optimizer,
metric_name="acc",
min_lr=[1e-7, 1e-8],
param_group_index=0,
)
with pytest.raises(
ValueError, match=r"Argument engine should have in its 'state', attribute 'metrics' which itself has the metric"
):
scheduler = ReduceLROnPlateauScheduler(optimizer, metric_name="acc")
evaluator = Engine(lambda engine, batch: None)
scheduler(evaluator)
with pytest.raises(ValueError, match=r"Length of argument metric_values should be equal to num_events."):
metric_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
ReduceLROnPlateauScheduler.simulate_values(5, metric_values, 0.01)
@pytest.mark.parametrize("warmup_end_value", [0.23, None])
@pytest.mark.parametrize("T_0", [1, 12])
@pytest.mark.parametrize("T_mult", [1, 3])
def test_create_lr_scheduler_with_warmup_cosine(warmup_end_value, T_0, T_mult):
lr = 0.2
steps = 200
warm_steps = 50
warm_start = 0.023
def get_optim():
t1 = torch.zeros([1], requires_grad=True)
return torch.optim.SGD([t1], lr=lr)
def get_cos_shed():
return CosineAnnealingWarmRestarts(optimizer, T_0=T_0, T_mult=T_mult)
optimizer = get_optim()
scheduler = get_cos_shed()
cosine_lrs = []
for i in range(steps):
cosine_lrs.append(optimizer.param_groups[0]["lr"])
scheduler.step()
optimizer = get_optim()
scheduler = create_lr_scheduler_with_warmup(
get_cos_shed(), warmup_start_value=warm_start, warmup_end_value=warmup_end_value, warmup_duration=warm_steps
)
warm_lrs = []
real_warm_steps = warm_steps if warmup_end_value is not None else (warm_steps - 1)
for epoch in range(real_warm_steps + steps):
scheduler(None)
warm_lrs.append(optimizer.param_groups[0]["lr"])
if warmup_end_value is not None:
np.testing.assert_allclose(np.linspace(warm_start, warmup_end_value, warm_steps), warm_lrs[:warm_steps])
assert warm_lrs[real_warm_steps:] == cosine_lrs
else:
np.testing.assert_allclose(np.linspace(warm_start, lr, warm_steps), warm_lrs[:warm_steps])
assert warm_lrs[real_warm_steps:] == cosine_lrs
|
import sys
import time
from unittest.mock import patch
import pytest
from pytest import approx
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
def _do_nothing_update_fn(engine, batch):
pass
def get_prepared_engine_for_basic_profiler(true_event_handler_time):
dummy_trainer = Engine(_do_nothing_update_fn)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
return dummy_trainer
def get_prepared_engine_for_handlers_profiler(true_event_handler_time):
HANDLERS_SLEEP_COUNT = 11
PROCESSING_SLEEP_COUNT = 3
class CustomEvents(EventEnum):
CUSTOM_STARTED = "custom_started"
CUSTOM_COMPLETED = "custom_completed"
def dummy_train_step(engine, batch):
engine.fire_event(CustomEvents.CUSTOM_STARTED)
time.sleep(true_event_handler_time)
engine.fire_event(CustomEvents.CUSTOM_COMPLETED)
dummy_trainer = Engine(dummy_train_step)
dummy_trainer.register_events(*CustomEvents)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(CustomEvents.CUSTOM_STARTED)
def delay_custom_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(CustomEvents.CUSTOM_COMPLETED)
def delay_custom_completed(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED(once=1))
def do_something_once_on_1_epoch():
time.sleep(true_event_handler_time)
return dummy_trainer, HANDLERS_SLEEP_COUNT, PROCESSING_SLEEP_COUNT
def test_profilers_wrong_inputs():
profiler = BasicTimeProfiler()
with pytest.raises(TypeError, match=r"Argument engine should be ignite.engine.Engine"):
profiler.attach(None)
with pytest.raises(ModuleNotFoundError, match=r"Need pandas to write results as files"):
with patch.dict("sys.modules", {"pandas": None}):
profiler.write_results("")
profiler = HandlersTimeProfiler()
with pytest.raises(TypeError, match=r"Argument engine should be ignite.engine.Engine"):
profiler.attach(None)
with pytest.raises(ModuleNotFoundError, match=r"Need pandas to write results as files"):
with patch.dict("sys.modules", {"pandas": None}):
profiler.write_results("")
def test_dataflow_timer_basic_profiler():
true_dataflow_time_per_ele = 0.1
true_max_epochs = 1
true_num_iters = 2
def dummy_data_loader(data):
while True:
for d in data:
time.sleep(true_dataflow_time_per_ele)
yield d
dummy_data = range(true_num_iters)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters)
results = profiler.get_results()
dataflow_results = results["dataflow_stats"]
assert dataflow_results["min/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["max/index"][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["mean"] == approx(true_dataflow_time_per_ele, abs=1e-1)
assert dataflow_results["std"] == approx(0.0, abs=1e-1)
assert dataflow_results["total"] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1)
def test_dataflow_timer_handlers_profiler():
true_dataflow_time_per_ele = 0.1
true_max_epochs = 1
true_num_iters = 2
def dummy_data_loader(data):
while True:
for d in data:
time.sleep(true_dataflow_time_per_ele)
yield d
dummy_data = range(true_num_iters)
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
dummy_trainer.run(dummy_data_loader(dummy_data), max_epochs=true_max_epochs, epoch_length=true_num_iters)
results = profiler.get_results()
dataflow_results = results[-1]
assert dataflow_results[0] == "Dataflow"
# event name
assert dataflow_results[1] == "None"
# total
assert dataflow_results[2] == approx(true_num_iters * true_dataflow_time_per_ele, abs=1e-1)
# min
assert dataflow_results[3][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
# max
assert dataflow_results[4][0] == approx(true_dataflow_time_per_ele, abs=1e-1)
# mean
assert dataflow_results[5] == approx(true_dataflow_time_per_ele, abs=1e-1)
# stddev
assert dataflow_results[6] == approx(0.0, abs=1e-1)
def test_processing_timer_basic_profiler():
true_processing_time = 0.1
true_max_epochs = 2
true_num_iters = 2
def train_updater(engine, batch):
time.sleep(true_processing_time)
profiler = BasicTimeProfiler()
dummy_trainer = Engine(train_updater)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
processing_results = results["processing_stats"]
assert processing_results["min/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["max/index"][0] == approx(true_processing_time, abs=1e-1)
assert processing_results["mean"] == approx(true_processing_time, abs=1e-1)
assert processing_results["std"] == approx(0.0, abs=1e-1)
assert processing_results["total"] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1)
def test_processing_timer_handlers_profiler():
true_processing_time = 0.1
true_max_epochs = 2
true_num_iters = 2
def train_updater(engine, batch):
time.sleep(true_processing_time)
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(train_updater)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
processing_results = results[-2]
assert processing_results[0] == "Processing"
# event name
assert processing_results[1] == "None"
# total
assert processing_results[2] == approx(true_max_epochs * true_num_iters * true_processing_time, abs=1e-1)
# min
assert processing_results[3][0] == approx(true_processing_time, abs=1e-1)
# max
assert processing_results[4][0] == approx(true_processing_time, abs=1e-1)
# mean
assert processing_results[5] == approx(true_processing_time, abs=1e-1)
# stddev
assert processing_results[6] == approx(0.0, abs=1e-1)
def test_event_handler_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["STARTED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_start" in event_results[0]
assert event_results[1] == "STARTED"
assert event_results[2] == approx(true_event_handler_time, abs=1e-1) # total
def test_event_handler_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["COMPLETED"]
assert event_results["total"] == approx(true_event_handler_time, abs=1e-1)
def test_event_handler_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_complete" in event_results[0]
assert event_results[1] == "COMPLETED"
assert event_results[2] == approx(true_event_handler_time, abs=1e-1) # total
def test_event_handler_epoch_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_epoch_start" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_epoch_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["EPOCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_event_handler_time, abs=1e-1)
def test_event_handler_epoch_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_epoch_complete" in event_results[0]
assert event_results[1] == "EPOCH_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_iteration_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_iter_start" in event_results[0]
assert event_results[1] == "ITERATION_STARTED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_iteration_completed_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["ITERATION_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_iteration_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_iter_complete" in event_results[0]
assert event_results[1] == "ITERATION_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_get_batch_started_basic_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_STARTED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_started_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_get_batch_started" in event_results[0]
assert event_results[1] == "GET_BATCH_STARTED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_get_batch_completed():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]["GET_BATCH_COMPLETED"]
assert event_results["min/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["max/index"][0] == approx(true_event_handler_time, abs=1e-1)
assert event_results["mean"] == approx(true_event_handler_time, abs=1e-1)
assert event_results["std"] == approx(0.0, abs=1e-1)
assert event_results["total"] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1)
def test_event_handler_get_batch_completed_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "delay_get_batch_completed" in event_results[0]
assert event_results[1] == "GET_BATCH_COMPLETED"
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_neg_event_filter_threshold_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED(once=2))
def do_something_once_on_2_epoch():
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "do_something_once_on_2_epoch" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == "not triggered"
def test_pos_event_filter_threshold_handlers_profiler():
true_event_handler_time = HandlersTimeProfiler.EVENT_FILTER_THESHOLD_TIME
true_max_epochs = 2
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.EPOCH_STARTED(once=2))
def do_something_once_on_2_epoch():
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results[0]
assert "do_something_once_on_2_epoch" in event_results[0]
assert event_results[1] == "EPOCH_STARTED"
assert event_results[2] == approx(
(true_max_epochs * true_num_iters * true_event_handler_time) / 2, abs=1e-1
) # total
def test_custom_event_with_arg_handlers_profiler():
true_event_handler_time = 0.1
true_max_epochs = 1
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
dummy_trainer.register_events("custom_event")
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED(every=1))
def trigger_custom_event():
dummy_trainer.fire_event("custom_event")
args = [122, 324]
@dummy_trainer.on("custom_event", args)
def on_custom_event(args):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = None
for row in results:
if row[1] == "custom_event":
event_results = row
break
assert event_results is not None
assert "on_custom_event" in event_results[0]
assert event_results[2] == approx(true_max_epochs * true_num_iters * true_event_handler_time, abs=1e-1) # total
assert event_results[3][0] == approx(true_event_handler_time, abs=1e-1) # min
assert event_results[4][0] == approx(true_event_handler_time, abs=1e-1) # max
assert event_results[5] == approx(true_event_handler_time, abs=1e-1) # mean
assert event_results[6] == approx(0.0, abs=1e-1) # stddev
def test_event_handler_total_time_basic_profiler():
true_event_handler_time = 0.125
true_max_epochs = 1
true_num_iters = 1
profiler = BasicTimeProfiler()
dummy_trainer = Engine(_do_nothing_update_fn)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.STARTED)
def delay_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.COMPLETED)
def delay_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_STARTED)
def delay_epoch_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.EPOCH_COMPLETED)
def delay_epoch_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_STARTED)
def delay_iter_start(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.ITERATION_COMPLETED)
def delay_iter_complete(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_STARTED)
def delay_get_batch_started(engine):
time.sleep(true_event_handler_time)
@dummy_trainer.on(Events.GET_BATCH_COMPLETED)
def delay_get_batch_completed(engine):
time.sleep(true_event_handler_time)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
event_results = results["event_handlers_stats"]
assert event_results["total_time"].item() == approx(true_event_handler_time * 8, abs=1e-1)
def test_event_handler_total_time_handlers_profiler():
true_event_handler_time = 0.125
true_max_epochs = 1
true_num_iters = 1
profiler = HandlersTimeProfiler()
dummy_trainer, handlers_sleep_count, processing_sleep_count = get_prepared_engine_for_handlers_profiler(
true_event_handler_time
)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
results = profiler.get_results()
total_handler_stats = results[-3] # total result row
total_processing_stats = results[-2] # processing result row
assert total_handler_stats[2] == approx(true_event_handler_time * handlers_sleep_count, abs=1e-1) # total time
assert total_processing_stats[2] == approx(true_event_handler_time * processing_sleep_count, abs=1e-1) # total time
def test_write_results_basic_profiler(dirname):
true_event_handler_time = 0.125
true_max_epochs = 3
true_num_iters = 2
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
fp = dirname / "test_log.csv"
profiler.write_results(fp)
assert fp.is_file()
file_length = 0
with open(fp) as f:
for _ in f:
file_length += 1
assert file_length == (true_max_epochs * true_num_iters) + 1
def test_write_results_handlers_profiler(dirname):
true_event_handler_time = 0.125
true_max_epochs = 3
true_num_iters = 2
profiler = HandlersTimeProfiler()
dummy_trainer, _, _ = get_prepared_engine_for_handlers_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
fp = dirname / "test_log.csv"
profiler.write_results(fp)
assert fp.is_file()
file_length = 0
with open(fp) as f:
for _ in f:
file_length += 1
assert file_length == (true_max_epochs * true_num_iters) + 1
def test_print_results_basic_profiler(capsys):
true_max_epochs = 1
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time=0.0125)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
BasicTimeProfiler.print_results(profiler.get_results())
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
def test_print_results_handlers_profiler_handlers_profiler(capsys):
true_max_epochs = 1
true_num_iters = 5
profiler = HandlersTimeProfiler()
dummy_trainer, _, _ = get_prepared_engine_for_handlers_profiler(true_event_handler_time=0.0125)
profiler.attach(dummy_trainer)
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
HandlersTimeProfiler.print_results(profiler.get_results())
captured = capsys.readouterr()
out = captured.out
assert "HandlersTimeProfiler." not in out
assert "Timer." not in out
def test_get_intermediate_results_during_run_basic_profiler(capsys):
true_event_handler_time = 0.0645
true_max_epochs = 2
true_num_iters = 5
profiler = BasicTimeProfiler()
dummy_trainer = get_prepared_engine_for_basic_profiler(true_event_handler_time)
profiler.attach(dummy_trainer)
@dummy_trainer.on(Events.ITERATION_COMPLETED(every=3))
def log_results(_):
results = profiler.get_results()
profiler.print_results(results)
captured = capsys.readouterr()
out = captured.out
assert "BasicTimeProfiler._" not in out
assert "nan" not in out
assert " min/index: (0.0, " not in out, out
dummy_trainer.run(range(true_num_iters), max_epochs=true_max_epochs)
|
import copy
import os
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib
import pytest
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
import ignite.distributed as idist
from ignite.contrib.handlers import FastaiLRFinder
from ignite.engine import create_supervised_trainer, Engine, Events
matplotlib.use("agg")
@pytest.fixture
def no_site_packages():
import sys
matplotlib = sys.modules["matplotlib"]
del sys.modules["matplotlib"]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
sys.modules["matplotlib"] = matplotlib
class DummyModel(nn.Module):
def __init__(self, n_channels=10, out_channels=1, flatten_input=False):
super(DummyModel, self).__init__()
self.net = nn.Sequential(nn.Flatten() if flatten_input else nn.Identity(), nn.Linear(n_channels, out_channels))
def forward(self, x):
return self.net(x)
class DummyModelMulipleParamGroups(nn.Module):
def __init__(self):
super(DummyModelMulipleParamGroups, self).__init__()
self.fc1 = nn.Linear(10, 20)
self.fc2 = nn.Linear(20, 10)
self.fc3 = nn.Linear(10, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
@pytest.fixture
def model():
model = DummyModel(out_channels=10)
yield model
@pytest.fixture
def model_multiple_param_groups():
model_multiple_param_groups = DummyModelMulipleParamGroups()
yield model_multiple_param_groups
@pytest.fixture
def mnist_model():
model = DummyModel(n_channels=784, out_channels=10, flatten_input=True)
yield model
@pytest.fixture
def optimizer(model):
yield SGD(model.parameters(), lr=1e-4, momentum=0.0)
@pytest.fixture
def optimizer_multiple_param_groups(model_multiple_param_groups):
optimizer_multiple_param_groups = SGD(
[
{"params": model_multiple_param_groups.fc1.parameters(), "lr": 4e-1},
{"params": model_multiple_param_groups.fc2.parameters(), "lr": 3e-2},
{"params": model_multiple_param_groups.fc3.parameters(), "lr": 3e-3},
]
)
yield optimizer_multiple_param_groups
@pytest.fixture
def mnist_optimizer(mnist_model):
yield SGD(mnist_model.parameters(), lr=1e-4, momentum=0.0)
@pytest.fixture
def to_save(model, optimizer):
yield {"model": model, "optimizer": optimizer}
@pytest.fixture
def mnist_to_save(mnist_model, mnist_optimizer):
yield {"model": mnist_model, "optimizer": mnist_optimizer}
@pytest.fixture
def to_save_mulitple_param_groups(model_multiple_param_groups, optimizer_multiple_param_groups):
yield {"model": model_multiple_param_groups, "optimizer": optimizer_multiple_param_groups}
@pytest.fixture
def lr_finder():
yield FastaiLRFinder()
@pytest.fixture
def dummy_engine(model, optimizer):
engine = create_supervised_trainer(model, optimizer, nn.MSELoss())
yield engine
@pytest.fixture
def dummy_engine_mnist(mnist_model, mnist_optimizer):
mnist_engine = create_supervised_trainer(mnist_model, mnist_optimizer, nn.CrossEntropyLoss())
yield mnist_engine
@pytest.fixture
def dummy_engine_mulitple_param_groups(model_multiple_param_groups, optimizer_multiple_param_groups):
engine_multiple_param_groups = create_supervised_trainer(
model_multiple_param_groups, optimizer_multiple_param_groups, nn.MSELoss()
)
yield engine_multiple_param_groups
@pytest.fixture
def dataloader():
yield torch.rand(100, 2, 10)
@pytest.fixture
def dataloader_plot():
yield torch.rand(500, 2, 10)
@pytest.fixture
def mnist_dataloader():
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root="/tmp", transform=data_transform, train=True), batch_size=256, shuffle=True
)
yield train_loader
def test_attach_incorrect_input_args(lr_finder, dummy_engine, model, optimizer, dataloader):
with pytest.raises(TypeError, match=r"Argument to_save should be a mapping"):
with lr_finder.attach(dummy_engine, to_save=123):
pass
with pytest.raises(TypeError, match=r"Object <class 'int'> should have `state_dict` method"):
with lr_finder.attach(dummy_engine, to_save={1: 2}):
pass
with pytest.raises(ValueError, match=r"Mapping to_save should contain 'optimizer' key"):
with lr_finder.attach(dummy_engine, to_save={"model": model}):
pass
to_save = {"model": model, "optimizer": optimizer}
with pytest.raises(ValueError, match=r"smooth_f is outside the range \[0, 1\]"):
with lr_finder.attach(dummy_engine, to_save=to_save, smooth_f=234):
pass
with pytest.raises(ValueError, match=r"diverge_th should be larger than 1"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=0.0):
pass
with pytest.raises(TypeError, match=r"if provided, num_iter should be an integer"):
with lr_finder.attach(dummy_engine, to_save=to_save, num_iter=0.0):
pass
with pytest.raises(ValueError, match=r"if provided, num_iter should be positive"):
with lr_finder.attach(dummy_engine, to_save=to_save, num_iter=0):
pass
with pytest.raises(TypeError, match=r"Object to_save\['optimizer'] should be torch optimizer"):
with lr_finder.attach(dummy_engine, {"model": to_save["model"], "optimizer": to_save["model"]}):
pass
with pytest.raises(ValueError, match=r"step_mode should be 'exp' or 'linear'"):
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="abc"):
pass
with lr_finder.attach(dummy_engine, to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
with pytest.raises(ValueError, match=r"skip_start cannot be negative"):
lr_finder.plot(skip_start=-1)
with pytest.raises(ValueError, match=r"skip_end cannot be negative"):
lr_finder.plot(skip_end=-1)
with pytest.raises(ValueError, match=r"Number of values of start_lr should be equal to optimizer values."):
with lr_finder.attach(dummy_engine, to_save, start_lr=[0.1, 0.1]):
pass
with pytest.raises(ValueError, match=r"Number of values of end_lr should be equal to optimizer values."):
with lr_finder.attach(dummy_engine, to_save, end_lr=[0.1, 0.1]):
pass
with pytest.raises(TypeError, match=r"start_lr should be a float or list of floats"):
with lr_finder.attach(dummy_engine, to_save, start_lr=1):
pass
with pytest.raises(TypeError, match=r"end_lr should be a float or list of floats"):
with lr_finder.attach(dummy_engine, to_save, end_lr=1):
pass
def test_attach_without_with(lr_finder, dummy_engine, to_save):
_ = lr_finder.attach(dummy_engine, to_save=to_save)
for event in dummy_engine._event_handlers:
assert len(dummy_engine._event_handlers[event]) == 0
with lr_finder.attach(dummy_engine, to_save=to_save) as _:
assert any([len(dummy_engine._event_handlers[event]) != 0 for event in dummy_engine._event_handlers])
with pytest.raises(
RuntimeError, match=r"learning rate finder didn't run yet so lr_suggestion can't be returned"
):
lr_finder.lr_suggestion()
with pytest.raises(RuntimeError, match=r"learning rate finder didn't run yet so results can't be plotted"):
lr_finder.plot()
def test_with_attach(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save=to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert lr_finder.get_results() is not None
for event in dummy_engine._event_handlers:
assert len(dummy_engine._event_handlers[event]) == 0
def test_wrong_values_start_lr_and_end_lr(
lr_finder, dummy_engine, to_save, dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups
):
with pytest.raises(ValueError, match=r"start_lr must be less than end_lr"):
with lr_finder.attach(dummy_engine, to_save=to_save, start_lr=10.0, end_lr=1.0):
pass
with pytest.raises(ValueError, match=r"start_lr must be less than end_lr"):
with lr_finder.attach(
dummy_engine_mulitple_param_groups,
to_save=to_save_mulitple_param_groups,
start_lr=[1.0, 10.0, 5.0],
end_lr=[10.0, 10.0, 10.0],
):
pass
def test_model_optimizer_reset(lr_finder, to_save, dummy_engine, dataloader):
optimizer = to_save["optimizer"]
model = to_save["model"]
init_optimizer_sd = copy.deepcopy(optimizer.state_dict())
init_model_sd = copy.deepcopy(model.state_dict())
init_trainer_sd = copy.deepcopy(dummy_engine.state_dict())
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert init_optimizer_sd == optimizer.state_dict()
for tensor1, tensor2 in zip(init_model_sd.values(), model.state_dict().values()):
assert torch.all(torch.eq(tensor1, tensor2))
assert init_trainer_sd == dummy_engine.state_dict()
def test_lr_policy(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="linear") as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr = lr_finder.get_results()["lr"]
assert all([lr[i - 1] < lr[i] for i in range(1, len(lr))])
with lr_finder.attach(dummy_engine, to_save=to_save, step_mode="exp") as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr = lr_finder.get_results()["lr"]
assert all([lr[i - 1] < lr[i] for i in range(1, len(lr))])
@pytest.mark.parametrize("step_mode", ["exp", "linear"])
def test_multiple_optimizers(
lr_finder, dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups, dataloader, step_mode
):
start_lr = [0.1, 0.1, 0.01]
end_lr = [1.0, 1.0, 1.0]
with lr_finder.attach(
dummy_engine_mulitple_param_groups,
to_save_mulitple_param_groups,
start_lr=start_lr,
end_lr=end_lr,
step_mode=step_mode,
) as trainer:
trainer.run(dataloader)
groups_lrs = lr_finder.get_results()["lr"]
assert [all([group_lrs[i - 1] < group_lrs[i] for i in range(1, len(group_lrs))]) for group_lrs in groups_lrs]
def assert_output_sizes(lr_finder, dummy_engine):
iteration = dummy_engine.state.iteration
lr_finder_results = lr_finder.get_results()
lr, loss = lr_finder_results["lr"], lr_finder_results["loss"]
assert len(lr) == len(loss) == iteration
def test_num_iter_is_none(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save=to_save, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
assert dummy_engine.state.iteration == len(dataloader)
def test_num_iter_is_enough(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(
dummy_engine, to_save=to_save, num_iter=50, diverge_th=float("inf")
) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
# -1 because it terminates when state.iteration > num_iter
assert dummy_engine.state.iteration - 1 == 50
def test_num_iter_is_not_enough(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save, num_iter=150, diverge_th=float("inf")) as trainer_with_finder:
with pytest.warns(UserWarning):
trainer_with_finder.run(dataloader)
assert_output_sizes(lr_finder, dummy_engine)
assert dummy_engine.state.iteration != len(dataloader)
assert dummy_engine.state.iteration == 150
def test_detach_terminates(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save, end_lr=100.0, diverge_th=2) as trainer_with_finder:
trainer_with_finder.run(dataloader)
dummy_engine.run(dataloader, max_epochs=3)
assert dummy_engine.state.epoch == 3
def test_different_num_iters(lr_finder, to_save, dummy_engine, dataloader):
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save, num_iter=200, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert trainer_with_finder.state.iteration == 200 # num_iter
with pytest.warns(UserWarning, match=r"Run completed without loss diverging"):
with lr_finder.attach(dummy_engine, to_save, num_iter=1000, diverge_th=float("inf")) as trainer_with_finder:
trainer_with_finder.run(dataloader)
assert trainer_with_finder.state.iteration == 1000 # num_iter
@pytest.mark.parametrize("step_mode", ["exp", "linear"])
def test_start_lr(lr_finder, to_save, dummy_engine, dataloader, step_mode):
with lr_finder.attach(
dummy_engine, to_save, start_lr=0.01, end_lr=10.0, num_iter=5, step_mode=step_mode, diverge_th=1
) as trainer_with_finder:
trainer_with_finder.run(dataloader)
history = lr_finder.get_results()
if step_mode == "exp":
assert 0.01 < history["lr"][0] < 0.16
else:
assert pytest.approx(history["lr"][0]) == 0.01
def test_engine_output_type(lr_finder, dummy_engine, optimizer):
from ignite.handlers.param_scheduler import PiecewiseLinear
dummy_engine.state.iteration = 1
dummy_engine.state.output = [10]
with pytest.raises(TypeError, match=r"output of the engine should be of type float or 0d torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
dummy_engine.state.output = (10, 5)
with pytest.raises(TypeError, match=r"output of the engine should be of type float or 0d torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
dummy_engine.state.output = torch.tensor([1, 2], dtype=torch.float32)
with pytest.raises(ValueError, match=r"if output of the engine is torch.Tensor"):
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
lr_finder._lr_schedule = PiecewiseLinear(
optimizer, param_name="lr", milestones_values=[(0, optimizer.param_groups[0]["lr"]), (100, 10)]
)
dummy_engine.state.output = torch.tensor(10.0, dtype=torch.float32)
lr_finder._history = {"lr": [], "loss": []}
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
loss = lr_finder._history["loss"][-1]
assert type(loss) is float
dummy_engine.state.output = torch.tensor([10.0], dtype=torch.float32)
lr_finder._history = {"lr": [], "loss": []}
lr_finder._log_lr_and_loss(dummy_engine, output_transform=lambda x: x, smooth_f=0, diverge_th=1)
loss = lr_finder._history["loss"][-1]
assert type(loss) is float
def test_lr_suggestion_unexpected_curve(lr_finder, to_save, dummy_engine, dataloader):
with lr_finder.attach(dummy_engine, to_save) as trainer_with_finder:
trainer_with_finder.run(dataloader)
lr_finder._history["loss"].insert(0, 0)
with pytest.raises(
RuntimeError, match=r"FastaiLRFinder got unexpected curve shape, the curve should be somehow U-shaped"
):
lr_finder.lr_suggestion()
def test_lr_suggestion_single_param_group(lr_finder): # , to_save, dummy_engine, dataloader):
import numpy as np
noise = 0.05
lr_finder._history["loss"] = np.linspace(-5.0, 5.0, num=100) ** 2 + noise
lr_finder._history["lr"] = np.linspace(0.01, 10, num=100)
# lr_finder.lr_suggestion() is supposed to return a value, but as
# we assign loss and lr to tensors, instead of lists, it will return tensors
suggested_lr = lr_finder.lr_suggestion()
assert pytest.approx(suggested_lr.item()) == 0.110909089
def test_lr_suggestion_multiple_param_groups(lr_finder):
import numpy as np
noise = 0.06
lr_finder._history["loss"] = np.linspace(-5.0, 5, num=50) ** 2 + noise
# 2 param_groups
lr_finder._history["lr"] = np.linspace(0.01, 10, num=100).reshape(50, 2)
# lr_finder.lr_suggestion() is supposed to return a list of values,
# but as we assign loss and lr to tensors, instead of lists, it will return tensors
suggested_lrs = lr_finder.lr_suggestion()
assert pytest.approx(suggested_lrs[0].item()) == 0.21181818
assert pytest.approx(suggested_lrs[1].item()) == 0.31272727
def test_lr_suggestion_mnist(lr_finder, mnist_to_save, dummy_engine_mnist, mnist_dataloader):
max_iters = 50
with lr_finder.attach(dummy_engine_mnist, mnist_to_save, diverge_th=2, step_mode="linear") as trainer_with_finder:
with trainer_with_finder.add_event_handler(
Events.ITERATION_COMPLETED(once=max_iters), lambda _: trainer_with_finder.terminate()
):
trainer_with_finder.run(mnist_dataloader)
assert 1e-4 <= lr_finder.lr_suggestion() <= 2
def test_apply_suggested_lr_unmatched_optimizers(
lr_finder, mnist_to_save, dummy_engine_mnist, optimizer_multiple_param_groups, mnist_dataloader
):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
sug_lr = lr_finder.lr_suggestion()
with pytest.raises(RuntimeError, match=r"The number of parameter groups does not match"):
lr_finder.apply_suggested_lr(optimizer_multiple_param_groups)
def test_apply_suggested_lr_single_param_groups(
lr_finder, mnist_to_save, dummy_engine_mnist, mnist_optimizer, mnist_dataloader
):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
sug_lr = lr_finder.lr_suggestion()
lr_finder.apply_suggested_lr(mnist_optimizer)
assert mnist_optimizer.param_groups[0]["lr"] == sug_lr
def test_apply_suggested_lr_multiple_param_groups(
lr_finder,
to_save_mulitple_param_groups,
dummy_engine_mulitple_param_groups,
optimizer_multiple_param_groups,
dataloader_plot,
):
with lr_finder.attach(dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups) as trainer_with_finder:
trainer_with_finder.run(dataloader_plot)
sug_lr = lr_finder.lr_suggestion()
lr_finder.apply_suggested_lr(optimizer_multiple_param_groups)
for i in range(len(sug_lr)):
assert optimizer_multiple_param_groups.param_groups[i]["lr"] == sug_lr[i]
def test_no_matplotlib(no_site_packages, lr_finder):
with pytest.raises(ModuleNotFoundError, match=r"This method requires matplotlib to be installed"):
lr_finder.plot()
def test_plot_single_param_group(dirname, lr_finder, mnist_to_save, dummy_engine_mnist, mnist_dataloader):
with lr_finder.attach(dummy_engine_mnist, mnist_to_save, end_lr=20.0, smooth_f=0.04) as trainer_with_finder:
trainer_with_finder.run(mnist_dataloader)
def _test(ax):
assert ax is not None
assert ax.get_xscale() == "log"
assert ax.get_xlabel() == "Learning rate"
assert ax.get_ylabel() == "Loss"
filepath = Path(dirname) / "dummy.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
filepath.unlink()
lr_finder.plot()
ax = lr_finder.plot(skip_end=0)
_test(ax)
# Passing axes object
from matplotlib import pyplot as plt
_, ax = plt.subplots()
lr_finder.plot(skip_end=0, ax=ax)
_test(ax)
def test_plot_multiple_param_groups(
dirname, lr_finder, to_save_mulitple_param_groups, dummy_engine_mulitple_param_groups, dataloader_plot
):
with lr_finder.attach(
dummy_engine_mulitple_param_groups, to_save_mulitple_param_groups, end_lr=20.0, smooth_f=0.04
) as trainer_with_finder:
trainer_with_finder.run(dataloader_plot)
def _test(ax):
assert ax is not None
assert ax.get_xscale() == "log"
assert ax.get_xlabel() == "Learning rate"
assert ax.get_ylabel() == "Loss"
filepath = Path(dirname) / "dummy_muliple_param_groups.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
filepath.unlink()
ax = lr_finder.plot(skip_start=0, skip_end=0)
_test(ax)
# Passing axes object
from matplotlib import pyplot as plt
_, ax = plt.subplots()
lr_finder.plot(skip_start=0, skip_end=0, ax=ax)
_test(ax)
def _test_distrib_log_lr_and_loss(device):
from ignite.handlers import ParamScheduler
lr_finder = FastaiLRFinder()
_lr_schedule = MagicMock(spec=ParamScheduler)
# minimal setup for lr_finder to make _log_lr_and_loss work
rank = idist.get_rank()
loss = 0.01 * (rank + 1)
engine = Engine(lambda e, b: None)
engine.state.output = loss
engine.state.iteration = 1
lr_finder._lr_schedule = _lr_schedule
lr_finder._history["loss"] = []
lr_finder._history["lr"] = []
lr_finder._log_lr_and_loss(engine, output_transform=lambda x: x, smooth_f=0.1, diverge_th=10.0)
expected_loss = idist.all_reduce(loss)
assert pytest.approx(lr_finder._history["loss"][-1]) == expected_loss
def _test_distrib_integration_mnist(dirname, device):
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root="/tmp", transform=data_transform, train=True), batch_size=256, shuffle=True
)
class DummyModel(nn.Module):
def __init__(self, n_channels=10, out_channels=1, flatten_input=False):
super(DummyModel, self).__init__()
self.net = nn.Sequential(
nn.Flatten() if flatten_input else nn.Identity(), nn.Linear(n_channels, out_channels)
)
def forward(self, x):
return self.net(x)
model = DummyModel(n_channels=784, out_channels=10, flatten_input=True)
model = model.to(device)
optimizer = SGD(model.parameters(), lr=1e-4, momentum=0.0)
to_save = {"model": model, "optimizer": optimizer}
engine = create_supervised_trainer(model, optimizer, nn.CrossEntropyLoss(), device=device)
lr_finder = FastaiLRFinder()
with lr_finder.attach(engine, to_save) as trainer_with_finder:
trainer_with_finder.run(train_loader)
lr_finder.plot()
if idist.get_rank() == 0:
ax = lr_finder.plot(skip_end=0)
filepath = Path(dirname) / "distrib_dummy.jpg"
ax.figure.savefig(filepath)
assert filepath.exists()
sug_lr = lr_finder.lr_suggestion()
assert 1e-3 <= sug_lr <= 1
lr_finder.apply_suggested_lr(optimizer)
assert optimizer.param_groups[0]["lr"] == sug_lr
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(dirname, distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(dirname, distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
device = idist.device()
assert "xla" in device.type
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
def _test_distrib_log_lr_and_loss_xla_nprocs(index, dirname):
device = idist.device()
_test_distrib_log_lr_and_loss(device)
_test_distrib_integration_mnist(dirname, device)
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_xla_nprocs(dirname, xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_log_lr_and_loss_xla_nprocs, args=(dirname,), nprocs=n)
|
import pytest
from ignite.engine.engine import Engine, Events
from ignite.handlers import EpochOutputStore
@pytest.fixture
def dummy_evaluator():
def dummy_process_function(engine, batch):
return 1, 0
dummy_evaluator = Engine(dummy_process_function)
return dummy_evaluator
@pytest.fixture
def eos():
return EpochOutputStore()
def test_no_transform(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [(1, 0)]
def test_transform(dummy_evaluator):
eos = EpochOutputStore(output_transform=lambda x: x[0])
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert eos.data == [1]
def test_reset(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(2))
eos.reset()
assert eos.data == []
def test_update_one_iteration(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(1))
assert len(eos.data) == 1
def test_update_five_iterations(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
dummy_evaluator.run(range(5))
assert len(eos.data) == 5
def test_attatch(dummy_evaluator, eos):
eos.attach(dummy_evaluator)
assert dummy_evaluator.has_event_handler(eos.reset, Events.EPOCH_STARTED)
assert dummy_evaluator.has_event_handler(eos.update, Events.ITERATION_COMPLETED)
def test_store_data(dummy_evaluator, eos):
eos.attach(dummy_evaluator, name="eval_data")
dummy_evaluator.run(range(1))
assert dummy_evaluator.state.eval_data == eos.data
|
import numpy as np
import pytest
import torch
from ignite.engine import Engine, Events, State
from ignite.handlers import TerminateOnNan
@pytest.mark.parametrize(
"state_output,should_terminate",
[
(1.0, False),
(torch.tensor(123.45), False),
(torch.asin(torch.tensor([1.0, 2.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])), True),
(torch.asin(torch.randn(4, 4)), True),
((10.0, 1.0 / torch.tensor([1.0, 2.0, 0.0, 3.0]), 1.0), True),
((1.0, torch.tensor(1.0), "abc"), False),
(1.0 / torch.randint(0, 2, size=(4, 4)).type(torch.float), True),
((float("nan"), 10.0), True),
(float("inf"), True),
([float("nan"), 10.0], True),
(np.array([1.0, 2.0]), False),
],
)
def test_terminate_on_nan_and_inf(state_output, should_terminate):
torch.manual_seed(12)
def update_fn(engine, batch):
pass
trainer = Engine(update_fn)
trainer.state = State()
h = TerminateOnNan()
trainer.state.output = state_output
if isinstance(state_output, np.ndarray):
h._output_transform = lambda x: x.tolist()
h(trainer)
assert trainer.should_terminate == should_terminate
def test_with_terminate_on_nan():
torch.manual_seed(12)
data = [1.0, 0.8, (torch.rand(4, 4), torch.rand(4, 4)), torch.rand(5), torch.asin(torch.randn(4, 4)), 0.0, 1.0]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 5
def test_with_terminate_on_inf():
torch.manual_seed(12)
data = [
1.0,
0.8,
torch.rand(4, 4),
(1.0 / torch.randint(0, 2, size=(4,)).type(torch.float), torch.tensor(1.234)),
torch.rand(5),
torch.asin(torch.randn(4, 4)),
0.0,
1.0,
]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == 4
def test_without_terminate_on_nan_inf():
data = [1.0, 0.8, torch.rand(4, 4), (torch.rand(5), torch.rand(5, 4)), 0.0, 1.0]
def update_fn(engine, batch):
return batch
trainer = Engine(update_fn)
h = TerminateOnNan()
trainer.add_event_handler(Events.ITERATION_COMPLETED, h)
trainer.run(data, max_epochs=2)
assert trainer.state.iteration == len(data) * 2
|
import os
import stat
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from packaging.version import Version
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, global_step_from_engine, ModelCheckpoint
from ignite.handlers.checkpoint import BaseSaveHandler
_PREFIX = "PREFIX"
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
class DummyPretrainedModel(nn.Module):
def __init__(self):
super(DummyPretrainedModel, self).__init__()
self.features = nn.Linear(4, 2, bias=False)
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
def test_checkpoint_wrong_input():
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint(12, lambda x: x, "prefix")
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint([12], lambda x: x, "prefix")
to_save = {"model": model}
with pytest.raises(
TypeError,
match=r"Argument `save_handler` should be a string or Path object or callable or inherit from BaseSaveHandler",
):
Checkpoint(to_save, 12, "prefix")
with pytest.raises(TypeError, match=r"global_step_transform should be a function."):
Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name="acc", global_step_transform=123)
with pytest.raises(ValueError, match=r"Cannot have key 'checkpointer' if `include_self` is True"):
Checkpoint({"checkpointer": model}, lambda x: x, include_self=True)
class ImmutableMapping(Mapping):
def __getitem__(self, key):
return to_save[key]
def __iter__(self):
return iter(to_save)
def __len__(self):
return len(to_save)
with pytest.raises(TypeError, match="If `include_self` is True, then `to_save` must be mutable"):
Checkpoint(ImmutableMapping(), lambda x: x, include_self=True)
checkpoint = Checkpoint(to_save, lambda x: x)
with pytest.raises(AttributeError, match="Checkpoint's `save_handler` should be of type `DiskSaver`"):
checkpoint.reload_objects(to_save)
def test_save_handler_as_str(dirname):
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=dirname)
assert isinstance(checkpointer.save_handler, DiskSaver)
def test_checkpoint_score_function_wrong_output():
to_save = {"model": model}
checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {"1": 1}, score_name="acc")
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
with pytest.raises(ValueError, match=r"Output of score_function should be a number"):
checkpointer(trainer)
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_default(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, f"{name}_0.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
save_handler.assert_called_with(obj, f"{name}_1234.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.pt")
assert checkpointer.last_checkpoint == f"{name}_1234.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_include_self_state_dict(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, include_self=True)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
fname = f"{name}_0.pt"
obj["checkpointer"] = OrderedDict([("saved", [(0, fname)])])
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, fname, metadata)
# Swap object, state should be maintained
checkpointer2 = Checkpoint(to_save, save_handler=save_handler, include_self=True)
checkpointer2.load_state_dict(checkpointer.state_dict())
assert checkpointer2.last_checkpoint == fname
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer2(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
# This delete only happens if state was restored correctly.
save_handler.remove.assert_called_with(f"{name}_0.pt")
fname = f"{name}_1234.pt"
obj["checkpointer"] = OrderedDict([("saved", [(1234, fname)])])
save_handler.assert_called_with(obj, fname, metadata)
assert save_handler.remove.call_count == 1
assert checkpointer2.last_checkpoint == fname
def test_checkpoint_with_dp():
dp_model = nn.DataParallel(model)
to_save = {"model": dp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
@pytest.mark.parametrize("filename_prefix", ["", "dummytask"])
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_global_step_transform(filename_prefix, to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
global_step_transform=lambda e, _: e.state.epoch,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=2, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
if len(filename_prefix) > 0:
filename_prefix += "_"
metadata = {"basename": f"{filename_prefix}{name}", "score_name": None, "priority": 2}
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_2.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{filename_prefix}{name}_2.pt")
assert checkpointer.last_checkpoint == f"{filename_prefix}{name}_12.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_score_function(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = 0.78
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_0.7800.pt"
def test_checkpoint_with_score_name_only():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
@pytest.mark.parametrize(
"to_save, obj, name",
[
({"model": model}, model.state_dict(), "model"),
(
{"model": model, "optimizer": optimizer},
{"model": model.state_dict(), "optimizer": optimizer.state_dict()},
"checkpoint",
),
],
)
def test_checkpoint_with_score_name_and_function(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name="loss", score_function=lambda e: e.state.score
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=-0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "loss", "priority": -0.77}
save_handler.assert_called_with(obj, f"{name}_loss=-0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = -0.76
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = -0.76
save_handler.assert_called_with(obj, f"{name}_loss=-0.7600.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_loss=-0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_loss=-0.7600.pt"
def test_checkpoint_with_int_score():
def _test(to_save, obj, name, score_name=None):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch
)
if score_name is None:
score_name = ""
else:
score_name += "="
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": score_name[:-1] if len(score_name) > 0 else None, "priority": 1}
save_handler.assert_called_with(obj, f"{name}_{score_name}1.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{name}_{score_name}12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_{score_name}1.pt")
assert checkpointer.last_checkpoint == f"{name}_{score_name}12.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
_test(to_save, model.state_dict(), "model", "epoch")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint", "epoch")
def test_checkpoint_with_score_function_and_trainer_epoch():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_0.7800.pt"
def test_checkpoint_with_score_name_and_function_and_trainer_epoch():
to_save = {"model": model}
obj = model.state_dict()
name = "model"
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
def test_checkpoint_last_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=1, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_9.pt"
def test_checkpoint_last_checkpoint_on_score():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
n_saved=None,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
trainer = Engine(lambda e, b: None)
val_acc = 0.0
for i in range(10):
val_acc = i * 0.1
trainer.state = State(epoch=1, iteration=i, metrics={"val_acc": val_acc})
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_val_acc=0.9000.pt"
def test_checkpoint_save_handler_callable():
def save_handler(c, f):
assert f == "model_12.pt"
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=12)
checkpointer(trainer)
def test_model_checkpoint_args_validation(dirname):
existing = dirname / "existing_dir"
nonempty = dirname / "nonempty"
existing.mkdir(parents=True)
nonempty.mkdir(parents=True)
with open(nonempty / f"{_PREFIX}_name_0.pt", "w"):
pass
with pytest.raises(ValueError, match=r"with extension '.pt' are already present "):
ModelCheckpoint(nonempty, _PREFIX)
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
ModelCheckpoint(dirname / "non_existing_dir", _PREFIX, create_dir=False)
with pytest.raises(TypeError, match=r"global_step_transform should be a function"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
assert h.last_checkpoint is None
with pytest.raises(RuntimeError, match=r"No objects to checkpoint found."):
h(None, [])
def test_model_checkpoint_simple_recovery(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
to_load = {"model": DummyModel()}
h.reload_objects(to_load=to_load, global_step=1)
assert to_load["model"].state_dict() == model.state_dict()
@pytest.mark.parametrize("ext, require_empty", [(".txt", True), (".pt", False)])
def test_model_checkpoint_simple_recovery_from_existing_non_empty(ext, require_empty, dirname):
previous_fname = dirname / f"{_PREFIX}_obj_{1}{ext}"
with open(previous_fname, "w") as f:
f.write("test")
h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
ext = ".pt"
assert isinstance(fname, Path)
assert dirname / f"{_PREFIX}_model_{1}{ext}" == fname
assert fname.exists()
assert previous_fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
to_load = {"model": DummyModel()}
h.reload_objects(to_load=to_load, global_step=1)
assert to_load["model"].state_dict() == model.state_dict()
fname.unlink()
def test_model_checkpoint_invalid_save_handler(dirname):
h = ModelCheckpoint(dirname, _PREFIX)
to_save = {"model": DummyModel()}
# Redefine save_handler
h.save_handler = lambda x, y: None
h(Engine(lambda x, y: None), to_save)
with pytest.raises(
RuntimeError, match=rf"Internal error, save_handler should be DiskSaver, but has {type(h.save_handler)}."
):
h.last_checkpoint
def test_disk_saver_atomic(dirname):
model = DummyModel()
to_save_serializable = {"model": model}
to_save_non_serializable = {"model": lambda x: x}
def _test_existence(atomic, _to_save, expected):
saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(_to_save, fname)
except Exception:
pass
fp = saver.dirname / fname
assert fp.exists() == expected
if expected:
# related to https://github.com/pytorch/ignite/issues/1876
mode = stat.filemode(fp.stat().st_mode)
assert [mode[1], mode[4], mode[7]] == ["r", "r", "r"], mode
if expected:
saver.remove(fname)
_test_existence(atomic=False, _to_save=to_save_serializable, expected=True)
_test_existence(atomic=False, _to_save=to_save_non_serializable, expected=True)
_test_existence(atomic=True, _to_save=to_save_serializable, expected=True)
_test_existence(atomic=True, _to_save=to_save_non_serializable, expected=False)
@pytest.mark.skipif(
Version(torch.__version__) < Version("1.4.0"), reason="Zipfile serialization was introduced in 1.4.0"
)
def test_disk_saver_zipfile_serialization_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, _use_new_zipfile_serialization=False)
fname = "test.pt"
saver(to_save, fname)
fp = saver.dirname / fname
assert fp.exists()
saver.remove(fname)
def test_disk_saver_unknown_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, unknown_keyword="")
fname = "test.pt"
with pytest.raises(TypeError, match=r"got an unexpected keyword argument 'unknown_keyword'"):
saver(to_save, fname)
def test_last_k(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
for i in range(1, 9):
engine.state.iteration = i
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i}.pt" for i in [7, 8]]
assert sorted(os.listdir(dirname)) == expected, f"{sorted(os.listdir(dirname))} vs {expected}"
def test_disabled_n_saved(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
num_iters = 100
for i in range(num_iters):
engine.state.iteration = i
h(engine, to_save)
saved_files = sorted(os.listdir(dirname))
assert len(saved_files) == num_iters, f"{saved_files}"
expected = sorted([f"{_PREFIX}_model_{i}.pt" for i in range(num_iters)])
assert saved_files == expected, f"{saved_files} vs {expected}"
def test_best_k(dirname):
scores = iter([1.2, -2.0, 3.1, -4.0])
def score_function(_):
return next(scores)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i:.4f}.pt" for i in [1.2, 3.1]]
assert sorted(os.listdir(dirname)) == expected
def test_best_k_with_suffix(dirname):
scores = [0.3456789, 0.1234, 0.4567, 0.134567]
scores_iter = iter(scores)
def score_function(engine):
return next(scores_iter)
h = ModelCheckpoint(
dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name="val_loss"
)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
engine.state.epoch += 1
h(engine, to_save)
expected = [f"{_PREFIX}_model_val_loss={scores[e - 1]:.4}.pt" for e in [1, 3]]
assert sorted(os.listdir(dirname)) == expected
def test_removes_each_score_at_most_once(dirname):
scores = [0, 1, 1, 2, 3]
scores_iter = iter(scores)
def score_function(_):
return next(scores_iter)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(len(scores)):
h(engine, to_save)
# If a score was removed multiple times, the code above would have raise a
# FileNotFoundError. So this just tests the absence of such a failure
# without futher assertions.
def test_with_engine(dirname):
def update_fn(_1, _2):
pass
name = "model"
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=4)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [3 * 2, 4 * 2]])
assert sorted(os.listdir(dirname)) == expected
def test_with_state_dict(dirname):
def update_fn(_1, _2):
pass
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1, 2], max_epochs=4)
saved_model = dirname / os.listdir(dirname)[0]
load_model = torch.load(saved_model)
assert not isinstance(load_model, DummyModel)
assert isinstance(load_model, dict)
model_state_dict = model.state_dict()
loaded_model_state_dict = load_model
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.numpy() == loaded_model_value.numpy()
def test_valid_state_dict_save(dirname):
model = DummyModel()
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
to_save = {"name": 42}
with pytest.raises(TypeError, match=r"should have `state_dict` method"):
h(engine, to_save)
to_save = {"name": model}
try:
h(engine, to_save)
except ValueError:
pytest.fail("Unexpected ValueError")
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, just_on_zero_rank=False):
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
# Below code raises: RuntimeError: torch_xla/csrc/tensor_impl.cpp:144 : XLA tensors do not have storage
# Probably related to https://github.com/pytorch/xla/issues/2576
# loss = y.pow(2.0).sum()
loss = y.sum()
print(loss.device, y.device, x.device)
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
if (not just_on_zero_rank) or (just_on_zero_rank and idist.get_rank() == 0):
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)
engine.add_event_handler(
Events.EPOCH_COMPLETED, handler, {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
)
engine.run([0, 1, 2], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = dirname / saved_objects[0]
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
def test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname)
def _test_save_model_optimizer_lr_scheduler_with_validation(device, dirname, just_on_zero_rank=False):
torch.manual_seed(23)
def _build_objects(acc_list):
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
loss = y.pow(2.0).sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
trainer = Engine(update_fn)
evaluator = Engine(lambda e, b: None)
acc_iter = iter(acc_list)
@evaluator.on(Events.EPOCH_COMPLETED)
def setup_result():
evaluator.state.metrics["accuracy"] = next(acc_iter)
@trainer.on(Events.EPOCH_COMPLETED)
def run_eval():
evaluator.run([0, 1, 2])
def score_function(engine):
return engine.state.metrics["accuracy"]
save_handler = DiskSaver(dirname, create_dir=True, require_empty=False)
early_stop = EarlyStopping(score_function=score_function, patience=2, trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, early_stop)
checkpointer = Checkpoint(
{
"trainer": trainer,
"model": model,
"optim": optim,
"lr_scheduler": lr_scheduler,
"early_stop": early_stop,
},
save_handler,
include_self=True,
global_step_transform=global_step_from_engine(trainer),
)
evaluator.add_event_handler(Events.COMPLETED, checkpointer)
return trainer, evaluator, model, optim, lr_scheduler, early_stop, checkpointer
trainer, evaluator, model, optim, scheduler, early, checkpointer = _build_objects([0.2, 0.3, 0.2])
trainer.run([0, 1, 2], max_epochs=3)
saved_objects = sorted(os.listdir(dirname))
saved_checkpoint = dirname / saved_objects[0]
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["trainer", "model", "optim", "lr_scheduler", "early_stop", "checkpointer"]:
assert f in loaded_obj
trainer2, evaluator2, model2, optim2, scheduler2, early2, checkpointer2 = _build_objects([0.1, 0.1, 0.1])
Checkpoint.load_objects(
{
"trainer": trainer2,
"model": model2,
"optim": optim2,
"lr_scheduler": scheduler2,
"early_stop": early2,
"checkpointer": checkpointer2,
},
loaded_obj,
)
assert checkpointer2.last_checkpoint == checkpointer.last_checkpoint
model_state_dict = model.cpu().state_dict()
loaded_model_state_dict = model2.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
loaded_optimizer_state_dict = optim2.state_dict()
# "params" contains tensor IDs, which are different
del optim_state_dict["param_groups"][0]["params"]
del loaded_optimizer_state_dict["param_groups"][0]["params"]
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
def _check_state_dict(original, loaded):
original_state_dict = original.state_dict()
loaded_state_dict = loaded.state_dict()
for key in original_state_dict.keys():
assert key in loaded_state_dict
original_value = original_state_dict[key]
loaded_value = loaded_state_dict[key]
assert original_value == loaded_value
_check_state_dict(trainer, trainer2)
_check_state_dict(scheduler, scheduler2)
_check_state_dict(early, early2)
_check_state_dict(checkpointer, checkpointer2)
trainer2.run([0, 1, 2], max_epochs=6)
# early stopping should have triggered
assert trainer2.state.epoch == 4
# If Checkpoint's state was restored correctly, it should continue to respect n_saved
# and delete old checkpoints, and have the correct last_checkpoint.
assert os.listdir(dirname) == ["checkpoint_4.pt"]
assert checkpointer2.last_checkpoint == dirname / "checkpoint_4.pt"
def test_save_model_optimizer_lr_scheduler_with_validation(dirname):
_test_save_model_optimizer_lr_scheduler_with_validation("cpu", dirname)
def test_checkpoint_load_objects():
with pytest.raises(TypeError, match=r"Argument checkpoint should be a string or a dictionary"):
Checkpoint.load_objects({}, [])
with pytest.raises(TypeError, match=r"should have `load_state_dict` method"):
Checkpoint.load_objects({"a": None}, {"a": None})
model = DummyModel()
to_load = {"model": model, "another_model": model}
with pytest.raises(ValueError, match=r"from `to_load` is not found in the checkpoint"):
Checkpoint.load_objects(to_load, {})
model = DummyModel()
to_load = {"model": model}
model2 = DummyModel()
chkpt = {"model": model2.state_dict()}
Checkpoint.load_objects(to_load, chkpt)
assert model.state_dict() == model2.state_dict()
def test_checkpoint_load_objects_from_saved_file(dirname):
def _get_single_obj_to_save():
model = DummyModel()
to_save = {"model": model}
return to_save
def _get_multiple_objs_to_save():
model = DummyModel()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
return to_save
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
# case: load from filepath
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
Checkpoint.load_objects(to_save, str(fname))
Checkpoint.load_objects(to_save, fname)
fname.unlink()
# case: multiple objects
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
fname.unlink()
# case: saved multiple objects, loaded single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
to_load = {"model": to_save["model"]}
Checkpoint.load_objects(to_load, loaded_objects)
fname.unlink()
# case: single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_single_obj_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
fname.unlink()
def test_load_checkpoint_with_different_num_classes(dirname):
model = DummyPretrainedModel()
to_save_single_object = {"model": model}
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
handler(trainer, to_save_single_object)
fname = handler.last_checkpoint
loaded_checkpoint = torch.load(fname)
to_load_single_object = {"pretrained_features": model.features}
with pytest.raises(RuntimeError):
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah="blah")
loaded_weights = to_load_single_object["pretrained_features"].state_dict()["weight"]
assert torch.all(model.state_dict()["features.weight"].eq(loaded_weights))
def test_disksaver_wrong_input(dirname):
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
DiskSaver("/tmp/non-existing-folder", create_dir=False)
def _test(ext):
previous_fname = dirname / f"{_PREFIX}_obj_{1}{ext}"
with open(previous_fname, "w") as f:
f.write("test")
with pytest.raises(ValueError, match=r"with extension '.pt' are already present"):
DiskSaver(dirname, require_empty=True)
_test(".pt")
def _test_checkpoint_with_ddp(device):
torch.manual_seed(0)
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
to_save = {"model": ddp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
def _test_checkpoint_load_objects_ddp(device):
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01)
# single object:
to_load = {"model": ddp_model}
checkpoint = ddp_model.module.state_dict()
Checkpoint.load_objects(to_load, checkpoint)
# multiple objects:
to_load = {"model": ddp_model, "opt": opt}
checkpoint = {"model": ddp_model.module.state_dict(), "opt": opt.state_dict()}
Checkpoint.load_objects(to_load, checkpoint)
def _test_checkpoint_with_ZeRO(device, dirname, local_rank):
from torch.distributed.optim import ZeroRedundancyOptimizer
model = DummyModel().to(device)
opt = ZeroRedundancyOptimizer(model.parameters(), torch.optim.SGD, lr=0.01)
mocked_opt = MagicMock(ZeroRedundancyOptimizer, wraps=opt)
# A `step` should be called to optimizer state get populated.
out = model(torch.tensor([1.0], device=device))
out.backward()
mocked_opt.step()
to_save = {"model": model, "optim": mocked_opt}
checkpointer = Checkpoint(to_save, dirname, save_on_rank=1)
engine = Engine(lambda e, b: None)
checkpointer(engine)
mocked_opt.consolidate_state_dict.assert_called_once_with(to=1)
if local_rank == 1:
loaded_state_dict = torch.load(dirname / "checkpoint_0.pt", map_location=device)["optim"]
state_dict = opt.state_dict()
assert loaded_state_dict == state_dict
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo, dirname, get_rank_zero_dirname, local_rank):
device = idist.device()
rank_zero_dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, rank_zero_dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, rank_zero_dirname / "2", just_on_zero_rank=True)
_test_checkpoint_with_ddp(device)
_test_checkpoint_load_objects_ddp(device)
from ignite.handlers.checkpoint import HAVE_ZERO
if HAVE_ZERO:
_test_checkpoint_with_ZeRO(device, dirname, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):
device = idist.device()
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname / "2", just_on_zero_rank=True)
_test_checkpoint_with_ddp(device=device)
_test_checkpoint_load_objects_ddp(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor, get_rank_zero_dirname):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
dirname = get_rank_zero_dirname()
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
(device, dirname / "1"),
np=nproc,
do_init=True,
)
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
("cpu", dirname / "2", True),
np=nproc,
do_init=True,
)
def _test_tpu_saves_to_cpu(device, dirname):
torch.manual_seed(0)
h = ModelCheckpoint(dirname, _PREFIX)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel().to(device)
to_save = {"model": model}
h(engine, to_save)
idist.barrier()
fname = h.last_checkpoint
assert isinstance(fname, Path)
assert str(dirname / _PREFIX) in str(fname)
assert fname.exists()
loaded_objects = torch.load(fname)
assert loaded_objects == model.cpu().state_dict()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
assert "xla" in idist.device().type
_test_tpu_saves_to_cpu(idist.device(), dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), dirname / "2")
def _test_tpu_saves_to_cpu_nprocs(index, dirname):
device = idist.device()
_test_tpu_saves_to_cpu(device, dirname / "1")
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname / "2")
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_xla_nprocs(xmp_executor, dirname):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)
def _test_checkpoint_filename_pattern_helper(
to_save,
filename_prefix="",
score_function=None,
score_name=None,
global_step_transform=None,
filename_pattern=None,
dirname=None,
):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=12, iteration=203, score=0.9999)
checkpointer(trainer)
return checkpointer.last_checkpoint
def _test_model_checkpoint_filename_pattern_helper(
to_save,
filename_prefix="",
score_function=None,
score_name=None,
global_step_transform=None,
filename_pattern=None,
dirname=None,
):
checkpointer = ModelCheckpoint(
dirname=dirname,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
require_empty=False,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=12, iteration=203, score=0.9999)
checkpointer(trainer, to_save)
return Path(checkpointer.last_checkpoint).name
@pytest.mark.parametrize("test_class", ["checkpoint", "model_checkpoint"])
def test_checkpoint_filename_pattern(test_class, dirname):
if test_class == "checkpoint":
_test = _test_checkpoint_filename_pattern_helper
elif test_class == "model_checkpoint":
_test = _test_model_checkpoint_filename_pattern_helper
model = DummyModel()
to_save = {"model": model}
assert _test(to_save, dirname=dirname) == "model_203.pt"
assert _test(to_save, "best", dirname=dirname) == "best_model_203.pt"
assert _test(to_save, score_function=lambda e: e.state.score, dirname=dirname) == "model_0.9999.pt"
res = _test(
to_save,
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "model_12_0.9999.pt"
assert (
_test(to_save, score_function=lambda e: e.state.score, score_name="acc", dirname=dirname)
== "model_acc=0.9999.pt"
)
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "model_12_acc=0.9999.pt"
assert _test(to_save, "best", score_function=lambda e: e.state.score, dirname=dirname) == "best_model_0.9999.pt"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "best_model_12_0.9999.pt"
res = _test(to_save, "best", score_function=lambda e: e.state.score, score_name="acc", dirname=dirname)
assert res == "best_model_acc=0.9999.pt"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
dirname=dirname,
)
assert res == "best_model_12_acc=0.9999.pt"
pattern = "{name}.{ext}"
assert _test(to_save, filename_pattern=pattern, dirname=dirname) == "model.pt"
pattern = "chk-{name}--{global_step}.{ext}"
assert _test(to_save, to_save, filename_pattern=pattern, dirname=dirname) == "chk-model--203.pt"
pattern = "chk-{filename_prefix}--{name}--{global_step}.{ext}"
assert _test(to_save, "best", filename_pattern=pattern, dirname=dirname) == "chk-best--model--203.pt"
pattern = "chk-{name}--{score}.{ext}"
assert (
_test(to_save, score_function=lambda e: e.state.score, filename_pattern=pattern, dirname=dirname)
== "chk-model--0.9999.pt"
)
pattern = "{global_step}-{name}-{score}.chk.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "12-model-0.9999.chk.pt"
pattern = "chk-{name}--{score_name}--{score}.{ext}"
res = _test(
to_save, score_function=lambda e: e.state.score, score_name="acc", filename_pattern=pattern, dirname=dirname
)
assert res == "chk-model--acc--0.9999.pt"
pattern = "chk-{name}-{global_step}-{score_name}-{score}.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "chk-model-12-acc-0.9999.pt"
pattern = "{filename_prefix}-{name}-{score}.chk"
res = _test(to_save, "best", score_function=lambda e: e.state.score, filename_pattern=pattern, dirname=dirname)
assert res == "best-model-0.9999.chk"
pattern = "resnet-{filename_prefix}-{name}-{global_step}-{score}.chk"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "resnet-best-model-12-0.9999.chk"
pattern = "{filename_prefix}-{name}-{score_name}-{score}.chk"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
filename_pattern=pattern,
dirname=dirname,
)
assert res == "best-model-acc-0.9999.chk"
pattern = "{global_step}-{filename_prefix}-{name}-{score_name}-{score}"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "12-best-model-acc-0.9999"
pattern = "SAVE-{name}-{score_name}-{score}.pth"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
dirname=dirname,
)
assert res == "SAVE-model-acc-0.9999.pth"
pattern = "{global_step}-chk-{filename_prefix}-{name}-{score_name}-{score}.{ext}"
assert _test(to_save, filename_pattern=pattern, dirname=dirname) == "203-chk--model-None-None.pt"
with pytest.raises(KeyError, match=r"random_key"):
pattern = "SAVE-{random_key}.{ext}"
_test(to_save, filename_pattern=pattern, dirname=dirname)
def test_setup_filename_pattern():
# default filename pattern
assert Checkpoint.setup_filename_pattern() == "{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False) == "{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, False, False) == "{name}_{global_step}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False) == "{name}_{global_step}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False, False) == "{name}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, True, False) == "{name}_{score_name}={score}.{ext}"
with pytest.raises(ValueError, match=r"At least one of with_score and with_global_step should be True."):
Checkpoint.setup_filename_pattern(False, False, False, False)
with pytest.raises(ValueError, match=r"If with_score_name is True, with_score should be also True"):
Checkpoint.setup_filename_pattern(True, False, True, True)
def _setup_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
trainer.state.iteration = 10
checkpointer(trainer)
trainer.state.iteration = 20
checkpointer(trainer)
assert save_handler.call_count == 3
return checkpointer
def test_checkpoint_state_dict():
checkpointer = _setup_checkpoint()
sd = checkpointer.state_dict()
assert "saved" in sd
assert isinstance(sd["saved"], list) and len(sd["saved"]) == len(checkpointer._saved)
for saved_item, true_item in zip(sd["saved"], checkpointer._saved):
assert saved_item[0] == true_item.priority
assert saved_item[1] == true_item.filename
def test_checkpoint_load_state_dict():
true_checkpointer = _setup_checkpoint()
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
sd = {"saved": [(0, "model_0.pt"), (10, "model_10.pt"), (20, "model_20.pt")]}
checkpointer.load_state_dict(sd)
assert checkpointer._saved == true_checkpointer._saved
def test_checkpoint_fixed_filename():
model = DummyModel()
to_save = {"model": model}
def _test(n_saved):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=n_saved, filename_pattern="{name}.{ext}")
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=i, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == i + 1
metadata = {"basename": "model", "score_name": None, "priority": i}
save_handler.assert_called_with(model.state_dict(), "model.pt", metadata)
_test(None)
_test(1)
_test(3)
def test_checkpoint_reset():
model = DummyModel()
to_save = {"model": model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=2)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=123)
checkpointer(trainer)
trainer.state.iteration = 234
checkpointer(trainer)
assert save_handler.call_count == 2
assert checkpointer.last_checkpoint == "model_234.pt"
assert len(checkpointer._saved) == 2
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_123.pt", "model_234.pt"])
checkpointer.reset()
assert len(checkpointer._saved) == 0
trainer.state.iteration = 124
checkpointer(trainer)
assert save_handler.call_count == 3
assert checkpointer.last_checkpoint == "model_124.pt"
assert len(checkpointer._saved) == 1
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt"])
def test_checkpoint_reset_with_engine(dirname):
name = "model"
engine = Engine(lambda e, b: None)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=10)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [9 * 2, 10 * 2]])
assert sorted(os.listdir(dirname)) == expected
assert "PREFIX_model_20.pt" in str(handler.last_checkpoint)
handler.reset()
engine.state.max_epochs = None
engine.run([0, 1], max_epochs=2)
expected += [f"{_PREFIX}_{name}_{i}.pt" for i in [1 * 2, 2 * 2]]
assert sorted(os.listdir(dirname)) == sorted(expected)
assert "PREFIX_model_4.pt" in str(handler.last_checkpoint)
def test_greater_or_equal():
scores = iter([1, 2, 2, 2])
def score_function(_):
return next(scores)
class Saver:
def __init__(self):
self.counter = 0
def __call__(self, c, f, m):
if self.counter == 0:
assert f == "model_1.pt"
else:
assert f == "model_2.pt"
self.counter += 1
handler = Saver()
checkpointer = Checkpoint(
to_save={"model": DummyModel()},
save_handler=handler,
score_function=score_function,
n_saved=2,
greater_or_equal=True,
)
trainer = Engine(lambda e, b: None)
for _ in range(4):
checkpointer(trainer)
assert handler.counter == 4
def test_greater_or_equal_model_checkpoint(dirname):
scores = iter([1, 2, 2, 2])
def score_function(_):
return next(scores)
checkpointer = ModelCheckpoint(
dirname,
score_function=score_function,
n_saved=2,
greater_or_equal=True,
)
trainer = Engine(lambda e, b: None)
to_save = {"model": DummyModel()}
for i in range(4):
checkpointer(trainer, to_save)
if i == 0:
assert Path(checkpointer.last_checkpoint).name == "model_1.pt"
else:
assert Path(checkpointer.last_checkpoint).name == "model_2.pt"
def test_get_default_score_fn():
with pytest.raises(ValueError, match=r"Argument score_sign should be 1 or -1"):
Checkpoint.get_default_score_fn("acc", 2.0)
engine = Engine(lambda e, b: None)
engine.state.metrics["acc"] = 0.9
engine.state.metrics["loss"] = 0.123
score_fn = Checkpoint.get_default_score_fn("acc")
score = score_fn(engine)
assert score == 0.9
score_fn = Checkpoint.get_default_score_fn("loss", -1)
score = score_fn(engine)
assert score == -0.123
@pytest.mark.parametrize("obj_to_save", ["optim", "trainer"])
def test_load_single_object(obj_to_save, dirname):
# Checks https://github.com/pytorch/ignite/issues/2479
trainer = Engine(lambda e, b: None)
if obj_to_save == "optim":
t = torch.tensor(0.0)
optim = torch.optim.SGD([t], lr=0.1)
to_save = {"optim": optim}
elif obj_to_save == "trainer":
to_save = {"trainer": trainer}
c = Checkpoint(to_save, save_handler=dirname)
c(trainer)
checkpoint_fp = dirname / c.last_checkpoint
Checkpoint.load_objects(to_load=to_save, checkpoint=str(checkpoint_fp))
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("atomic", [False, True])
def test_disksaver_distrib(distributed_context_single_node_gloo, dirname, local_rank, atomic):
saver = DiskSaver(dirname, atomic, save_on_rank=1)
mocked_saver = MagicMock(wraps=saver)
mocked_saver(checkpoint={}, filename="test_disksaver_distrib.pt")
if local_rank == 1:
assert (dirname / "test_disksaver_distrib.pt").exists()
else:
mocked_saver._save_func.assert_not_called()
|
import pytest
from ignite.base import Serializable
def test_state_dict():
s = Serializable()
with pytest.raises(NotImplementedError):
s.state_dict()
def test_load_state_dict():
s = Serializable()
s.load_state_dict({})
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import shutil
import sys
sys.path.insert(0, os.path.abspath("../.."))
from datetime import datetime
import pytorch_sphinx_theme
import ignite
# -- Project information -----------------------------------------------------
project = "PyTorch-Ignite"
author = "PyTorch-Ignite Contributors"
copyright = f"{datetime.now().year}, {author}"
# The short X.Y version
try:
version = os.environ["code_version"]
except KeyError:
version = ignite.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinxcontrib.katex",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinx_copybutton",
"sphinx_togglebutton",
"sphinx_design",
]
# toggle button hint text
togglebutton_hint = "Show default setup"
togglebutton_hint_hide = "Hide default setup"
# Copy defaults.rst to source/generated to be discoverable in docstrings
# Skip this step for previous versions of ignite
if os.path.exists("defaults.rst"):
src_folder = os.path.dirname(__file__)
gen_folder = os.path.join(src_folder, "generated")
os.makedirs(gen_folder, exist_ok=True)
shutil.copy(os.path.join(src_folder, "defaults.rst"), gen_folder)
# katex options
katex_prerender = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_title = f"{project} {version} Documentation"
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
"canonical_url": "https://pytorch.org/ignite/",
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
"navigation_with_keys": True,
}
html_logo = "_templates/_static/img/ignite_logo.svg"
html_favicon = "_templates/_static/img/ignite_logomark.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static", "_templates/_static"]
html_context = {
"extra_css_files": [
# 'https://fonts.googleapis.com/css?family=Lato',
# '_static/css/pytorch_theme.css'
"_static/css/ignite_theme.css",
"https://cdn.jsdelivr.net/npm/@docsearch/css@3",
],
}
html_last_updated_fmt = "%m/%d/%Y, %X"
html_permalinks = True
html_permalinks_icon = "#"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ignitedoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ignite",
"ignite Documentation",
author,
"ignite",
"One line description of project.",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Type hints configs ------------------------------------------------------
autodoc_inherit_docstrings = True
autoclass_content = "both"
autodoc_typehints = "description"
napoleon_attr_annotations = True
# -- Autosummary patch to get list of a classes, funcs automatically ----------
from importlib import import_module
from inspect import getmembers, isclass, isfunction
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx.ext.autosummary import Autosummary
class AutolistAutosummary(Autosummary):
"""Autosummary with autolisting for modules.
By default it tries to import all public names (__all__),
otherwise import all classes and/or functions in a module.
Options:
- :autolist: option to get list of classes and functions from currentmodule.
- :autolist-classes: option to get list of classes from currentmodule.
- :autolist-functions: option to get list of functions from currentmodule.
Example Usage:
.. currentmodule:: ignite.metrics
.. autosummary::
:nosignatures:
:autolist:
"""
# Add new option
_option_spec = Autosummary.option_spec.copy()
_option_spec.update(
{
"autolist": directives.unchanged,
"autolist-classes": directives.unchanged,
"autolist-functions": directives.unchanged,
}
)
option_spec = _option_spec
def run(self):
for auto in ("autolist", "autolist-classes", "autolist-functions"):
if auto in self.options:
# Get current module name
module_name = self.env.ref_context.get("py:module")
# Import module
module = import_module(module_name)
# Get public names (if possible)
try:
names = getattr(module, "__all__")
except AttributeError:
# Get classes defined in the module
cls_names = [
name[0]
for name in getmembers(module, isclass)
if name[-1].__module__ == module_name and not (name[0].startswith("_"))
]
# Get functions defined in the module
fn_names = [
name[0]
for name in getmembers(module, isfunction)
if (name[-1].__module__ == module_name) and not (name[0].startswith("_"))
]
names = cls_names + fn_names
# It may happen that module doesn't have any defined class or func
if not names:
names = [name[0] for name in getmembers(module)]
# Filter out members w/o doc strings
names = [name for name in names if getattr(module, name).__doc__ is not None]
if auto == "autolist":
# Get list of all classes and functions inside module
names = [
name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))
]
else:
if auto == "autolist-classes":
# Get only classes
check = isclass
elif auto == "autolist-functions":
# Get only functions
check = isfunction
else:
raise NotImplementedError
names = [name for name in names if check(getattr(module, name))]
# Update content
self.content = StringList(names)
return super().run()
# --- autosummary config -----------------------------------------------------
autosummary_generate = True
# --- nitpicky config : check internal links are correct or not --------------
nitpicky = True
# ignore links which can't be referenced
nitpick_ignore = [
("py:class", ".."),
("py:class", "TextIO"),
("py:class", "torch.device"),
("py:class", "_MpDeviceLoader"),
("py:class", "torch.nn.modules.module.Module"),
("py:class", "torch.optim.optimizer.Optimizer"),
("py:class", "torch.utils.data.dataset.Dataset"),
("py:class", "torch.utils.data.sampler.BatchSampler"),
("py:class", "torch.cuda.amp.grad_scaler.GradScaler"),
("py:class", "torch.optim.lr_scheduler._LRScheduler"),
("py:class", "torch.optim.lr_scheduler.LRScheduler"),
("py:class", "torch.utils.data.dataloader.DataLoader"),
]
linkcheck_ignore = [
"https://github.com/fossasia/visdom#visdom-arguments-python-only",
"https://github.com/pytorch/ignite/tree/master/examples/cifar10#check-resume-training",
"https://github.com/pytorch/ignite/tree/master/examples/mnist#training-save--resume",
]
def setup(app):
app.add_directive("autosummary", AutolistAutosummary, override=True)
|
"""
MNIST example with training and validation monitoring using Neptune.
Requirements:
Neptune: `pip install neptune`
Usage:
Run the example:
```bash
python mnist_with_neptune_logger.py
```
Go to https://neptune.ai and explore your run.
Note:
You can view example runs here:
https://app.neptune.ai/o/common/org/pytorch-ignite-integration/
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.neptune_logger import (
global_step_from_engine,
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project="common/pytorch-ignite-integration",
name="ignite-mnist-example",
)
npt_logger.experiment["params"] = {
"train_batch_size": train_batch_size,
"val_batch_size": val_batch_size,
"epochs": epochs,
"lr": lr,
"momentum": momentum,
}
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
npt_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
npt_logger.attach(
trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)
)
npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
def score_function(engine):
return engine.state.metrics["accuracy"]
handler = Checkpoint(
{"model": model},
NeptuneSaver(npt_logger),
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, handler)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
npt_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
"""
MNIST example with training and validation monitoring using TensorboardX and Tensorboard.
Requirements:
Optionally TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard_logger.py --log_dir=/tmp/tensorboard_logs
```
"""
import sys
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.tensorboard_logger import (
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
TensorboardLogger,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
if sys.version_info > (3,):
from ignite.contrib.metrics.gpu_info import GpuInfo
try:
GpuInfo().attach(trainer)
except RuntimeError:
print(
"INFO: By default, in this example it is possible to log GPU information (used memory, utilization). "
"As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please "
"install it : `pip install pynvml`"
)
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
tb_logger = TensorboardLogger(log_dir=log_dir)
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
metric_names="all",
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
tb_logger.attach(
trainer,
log_handler=WeightsScalarHandler(model, whitelist=["fc1"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def is_conv(n, _):
return "conv" in n
tb_logger.attach(
trainer,
log_handler=WeightsHistHandler(model, whitelist=is_conv),
event_name=Events.ITERATION_COMPLETED(every=100),
)
tb_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
tb_logger.attach(
trainer,
log_handler=GradsHistHandler(model, whitelist=["fc2.weight"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
log_dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
tb_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_dir)
|
"""
MNIST example with training and validation monitoring using ClearML.
Requirements:
ClearML: `pip install clearml`
Usage:
Run the example:
```bash
python mnist_with_clearml_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.clearml_logger import (
ClearMLLogger,
ClearMLSaver,
global_step_from_engine,
GradsHistHandler,
GradsScalarHandler,
WeightsHistHandler,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
clearml_logger = ClearMLLogger(project_name="examples", task_name="ignite")
clearml_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training metrics", train_evaluator), ("validation metrics", validation_evaluator)]:
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
clearml_logger.attach_opt_params_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer
)
clearml_logger.attach(
trainer,
log_handler=WeightsScalarHandler(model, whitelist=["fc1"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
def is_conv(n, _):
return "conv" in n
clearml_logger.attach(
trainer,
log_handler=WeightsHistHandler(model, whitelist=is_conv),
event_name=Events.ITERATION_COMPLETED(every=100),
)
clearml_logger.attach(
trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)
)
clearml_logger.attach(
trainer,
log_handler=GradsHistHandler(model, whitelist=["fc2.weight"]),
event_name=Events.ITERATION_COMPLETED(every=100),
)
handler = Checkpoint(
{"model": model},
ClearMLSaver(),
n_saved=1,
score_function=lambda e: e.state.metrics["accuracy"],
score_name="val_acc",
filename_prefix="best",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
clearml_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
"""
MNIST example with training and validation monitoring using Tensorboard on TPU
Requirements:
- PyTorch >= 1.5
- PyTorch XLA >= 1.5
- Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard_on_tpu.py --log_dir=/tmp/tensorboard_logs
```
"""
from argparse import ArgumentParser
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss, RunningAverage
try:
import torch_xla.core.xla_model as xm
except ImportError:
raise ModuleNotFoundError(
"In order to run PyTorch on TPU we need to install PyTorch XLA:"
"\n\t- curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o xla-setup.py"
"\n\t- python xla-setup.py --version 1.5"
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
# Use TPU device
device = xm.xla_device()
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
# Create trainer and evaluator
trainer = create_supervised_trainer(
model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item()]
)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
tracker = xm.RateTracker()
# Add RateTracker as an output of the training step
@trainer.on(Events.ITERATION_COMPLETED)
def add_rate_tracker(engine):
tracker.add(len(engine.state.batch))
engine.state.output.append(tracker.global_rate())
# Setup output values of the training step as EMA metrics
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, "batch_loss")
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, "global_rate")
# Let's log the EMA metrics every `log_interval` iterations
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
writer.add_scalar("training/batch_loss", engine.state.metrics["batch_loss"], engine.state.iteration)
writer.add_scalar("training/global_rate", engine.state.metrics["global_rate"], engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
|
from argparse import ArgumentParser
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
# Basic model's definition
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
"""Method to setup data loaders: train_loader and val_loader"""
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size,
shuffle=True,
num_workers=4,
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size,
shuffle=False,
num_workers=4,
)
return train_loader, val_loader
def log_model_weights(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model weights: print and dump into a file"""
assert model and fp
output = {"total": 0.0}
max_counter = 5
for name, p in model.named_parameters():
name = name.replace(".", "/")
n = torch.norm(p)
if max_counter > 0:
output[name] = n
output["total"] += n
max_counter -= 1
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def log_model_grads(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model gradients: print and dump into a file"""
assert model and fp
output = {"grads/total": 0.0}
max_counter = 5
for name, p in model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
n = torch.norm(p.grad)
if max_counter > 0:
output[f"grads/{name}"] = n
output["grads/total"] += n
max_counter -= 1
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def log_data_stats(engine, fp=None, **kwargs):
"""Helper method to log mean/std of input batch of images and median of batch of targets."""
assert fp
x, y = engine.state.batch
output = {
"batch xmean": x.mean().item(),
"batch xstd": x.std().item(),
"batch ymedian": y.median().item(),
}
output_items = " - ".join([f"{m}:{v:.4f}" for m, v in output.items()])
msg = f"{engine.state.epoch} | {engine.state.iteration}: {output_items}"
with open(fp, "a") as h:
h.write(msg)
h.write("\n")
def run(
train_batch_size,
val_batch_size,
epochs,
lr,
momentum,
log_interval,
log_dir,
checkpoint_every,
resume_from,
crash_iteration=-1,
deterministic=False,
):
# Setup seed to have same model's initialization:
manual_seed(75)
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
criterion = nn.NLLLoss()
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
lr_scheduler = StepLR(optimizer, step_size=1, gamma=0.5)
# Setup trainer and evaluator
if deterministic:
tqdm.write("Setup deterministic trainer")
trainer = create_supervised_trainer(model, optimizer, criterion, device=device, deterministic=deterministic)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(criterion)}, device=device
)
# Apply learning rate scheduling
@trainer.on(Events.EPOCH_COMPLETED)
def lr_step(engine):
lr_scheduler.step()
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f"Epoch {0} - loss: {0:.4f} - lr: {lr:.4f}")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
lr = optimizer.param_groups[0]["lr"]
pbar.desc = f"Epoch {engine.state.epoch} - loss: {engine.state.output:.4f} - lr: {lr:.4f}"
pbar.update(log_interval)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
writer.add_scalar("lr", lr, engine.state.iteration)
if crash_iteration > 0:
@trainer.on(Events.ITERATION_COMPLETED(once=crash_iteration))
def _(engine):
raise Exception(f"STOP at {engine.state.iteration}")
if resume_from is not None:
@trainer.on(Events.STARTED)
def _(engine):
pbar.n = engine.state.iteration % engine.state.epoch_length
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
# Compute and log validation metrics
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# Setup object to checkpoint
objects_to_checkpoint = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
training_checkpoint = Checkpoint(
to_save=objects_to_checkpoint,
save_handler=DiskSaver(log_dir, require_empty=False),
n_saved=None,
global_step_transform=lambda *_: trainer.state.epoch,
)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=checkpoint_every), training_checkpoint)
# Setup logger to print and dump into file: model weights, model grads and data stats
# - first 3 iterations
# - 4 iterations after checkpointing
# This helps to compare resumed training with checkpointed training
def log_event_filter(e, event):
if event in [1, 2, 3]:
return True
elif 0 <= (event % (checkpoint_every * e.state.epoch_length)) < 5:
return True
return False
fp = Path(log_dir) / ("run.log" if resume_from is None else "resume_run.log")
fp = fp.as_posix()
for h in [log_data_stats, log_model_weights, log_model_grads]:
trainer.add_event_handler(Events.ITERATION_COMPLETED(event_filter=log_event_filter), h, model=model, fp=fp)
if resume_from is not None:
tqdm.write(f"Resume from the checkpoint: {resume_from}")
checkpoint = torch.load(resume_from)
Checkpoint.load_objects(to_load=objects_to_checkpoint, checkpoint=checkpoint)
try:
# Synchronize random states
manual_seed(15)
trainer.run(train_loader, max_epochs=epochs)
except Exception as e:
import traceback
print(traceback.format_exc())
pbar.close()
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="/tmp/mnist_save_resume", help="log directory for Tensorboard log output"
)
parser.add_argument("--checkpoint_every", type=int, default=1, help="Checkpoint training every X epochs")
parser.add_argument(
"--resume_from", type=str, default=None, help="Path to the checkpoint .pt file to resume training from"
)
parser.add_argument("--crash_iteration", type=int, default=-1, help="Iteration at which to raise an exception")
parser.add_argument(
"--deterministic", action="store_true", help="Deterministic training with dataflow synchronization"
)
args = parser.parse_args()
run(
args.batch_size,
args.val_batch_size,
args.epochs,
args.lr,
args.momentum,
args.log_interval,
args.log_dir,
args.checkpoint_every,
args.resume_from,
args.crash_iteration,
args.deterministic,
)
|
from argparse import ArgumentParser
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
try:
import visdom
except ImportError:
raise ModuleNotFoundError("No visdom package is found. Please install it with command: \n pip install visdom")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def create_plot_window(vis, xlabel, ylabel, title):
return vis.line(X=np.array([1]), Y=np.array([np.nan]), opts=dict(xlabel=xlabel, ylabel=ylabel, title=title))
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
vis = visdom.Visdom()
# if not vis.check_connection():
# raise RuntimeError("Visdom server not running. Please run python -m visdom.server")
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device
)
train_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Loss")
train_avg_loss_window = create_plot_window(vis, "#Iterations", "Loss", "Training Average Loss")
train_avg_accuracy_window = create_plot_window(vis, "#Iterations", "Accuracy", "Training Average Accuracy")
val_avg_loss_window = create_plot_window(vis, "#Epochs", "Loss", "Validation Average Loss")
val_avg_accuracy_window = create_plot_window(vis, "#Epochs", "Accuracy", "Validation Average Accuracy")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
vis.line(
X=np.array([engine.state.iteration]),
Y=np.array([engine.state.output]),
update="append",
win=train_loss_window,
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=train_avg_accuracy_window, update="append"
)
vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=train_avg_loss_window, update="append")
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
vis.line(
X=np.array([engine.state.epoch]), Y=np.array([avg_accuracy]), win=val_avg_accuracy_window, update="append"
)
vis.line(X=np.array([engine.state.epoch]), Y=np.array([avg_nll]), win=val_avg_loss_window, update="append")
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument("--log_file", type=str, default=None, help="log file to log output to")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
"""
MNIST example with training and validation monitoring using Weights & Biases
Requirements:
Weights & Biases: `pip install wandb`
Usage:
Make sure you are logged into Weights & Biases (use the `wandb` command).
Run the example:
```bash
python mnist_with_wandb_logger.py
```
Go to https://wandb.com and explore your experiment.
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.wandb_logger import global_step_from_engine, WandBLogger
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
wandb_logger = WandBLogger(
project="pytorch-ignite-integration",
name="ignite-mnist-example",
config={
"train_batch_size": train_batch_size,
"val_batch_size": val_batch_size,
"epochs": epochs,
"lr": lr,
"momentum": momentum,
},
)
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
wandb_logger.attach_opt_params_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer
)
wandb_logger.watch(model, log="all")
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
wandb_logger.run.dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
wandb_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)
|
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers import ProgressBar
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss, RunningAverage
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, display_gpu_info):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device
)
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
if display_gpu_info:
from ignite.contrib.metrics import GpuInfo
GpuInfo().attach(trainer, name="gpu")
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names="all")
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=epochs)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--display_gpu_info",
action="store_true",
help="Display gpu usage info. This needs python 3.X and pynvml package",
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.display_gpu_info)
|
"""
MNIST example with training and validation monitoring using Visdom.
Requirements:
Visdom (https://github.com/facebookresearch/visdom.git):
`pip install git+https://github.com/facebookresearch/visdom.git`
Usage:
Start visdom server:
```bash
visdom -logging_level 30
```
Run the example:
```bash
python mnist_with_visdom_logger.py
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.contrib.handlers.visdom_logger import (
global_step_from_engine,
GradsScalarHandler,
VisdomLogger,
WeightsScalarHandler,
)
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("Trainer")
metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)}
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator.logger = setup_logger("Val Evaluator")
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
train_evaluator.run(train_loader)
validation_evaluator.run(val_loader)
vd_logger = VisdomLogger(env="mnist_training")
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=100),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names=["loss", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
vd_logger.attach_opt_params_handler(trainer, event_name=Events.ITERATION_COMPLETED(every=100), optimizer=optimizer)
vd_logger.attach(trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
vd_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))
def score_function(engine):
return engine.state.metrics["accuracy"]
model_checkpoint = ModelCheckpoint(
log_dir,
n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
vd_logger.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument("--log_dir", type=str, default="mnist_visdom_logs", help="log directory for training output")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_dir)
|
"""
MNIST example with training and validation monitoring using Tensorboard.
Requirements:
TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
or PyTorch >= 1.2 which supports Tensorboard
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboard.py --log_dir=/tmp/tensorboard_logs
```
"""
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
|
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f"ITERATION - loss: {0:.2f}")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = f"ITERATION - loss: {engine.state.output:.2f}"
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time(engine):
tqdm.write(f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds")
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
import argparse
import os
import random
import warnings
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
from ignite.metrics import RunningAverage
try:
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
except ImportError:
raise ModuleNotFoundError(
"Please install torchvision to run this example, for example "
"via conda by running 'conda install -c pytorch torchvision'. "
)
PRINT_FREQ = 100
FAKE_IMG_FNAME = "fake_sample_epoch_{:04d}.png"
REAL_IMG_FNAME = "real_sample_epoch_{:04d}.png"
LOGS_FNAME = "logs.tsv"
PLOT_FNAME = "plot.svg"
SAMPLES_FNAME = "samples.svg"
CKPT_PREFIX = "networks"
class Net(nn.Module):
"""A base class for both generator and the discriminator.
Provides a common weight initialization scheme.
"""
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if "Conv" in classname:
m.weight.data.normal_(0.0, 0.02)
elif "BatchNorm" in classname:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
return x
class Generator(Net):
"""Generator network.
Args:
nf (int): Number of filters in the second-to-last deconv layer
"""
def __init__(self, z_dim, nf, nc):
super(Generator, self).__init__()
self.net = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(in_channels=z_dim, out_channels=nf * 8, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(nf * 8),
nn.ReLU(inplace=True),
# state size. (nf*8) x 4 x 4
nn.ConvTranspose2d(in_channels=nf * 8, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 4),
nn.ReLU(inplace=True),
# state size. (nf*4) x 8 x 8
nn.ConvTranspose2d(in_channels=nf * 4, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 2),
nn.ReLU(inplace=True),
# state size. (nf*2) x 16 x 16
nn.ConvTranspose2d(in_channels=nf * 2, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf),
nn.ReLU(inplace=True),
# state size. (nf) x 32 x 32
nn.ConvTranspose2d(in_channels=nf, out_channels=nc, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
self.weights_init()
def forward(self, x):
return self.net(x)
class Discriminator(Net):
"""Discriminator network.
Args:
nf (int): Number of filters in the first conv layer.
"""
def __init__(self, nc, nf):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(in_channels=nc, out_channels=nf, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf) x 32 x 32
nn.Conv2d(in_channels=nf, out_channels=nf * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*2) x 16 x 16
nn.Conv2d(in_channels=nf * 2, out_channels=nf * 4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*4) x 8 x 8
nn.Conv2d(in_channels=nf * 4, out_channels=nf * 8, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(nf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nf*8) x 4 x 4
nn.Conv2d(in_channels=nf * 8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False),
nn.Sigmoid(),
)
self.weights_init()
def forward(self, x):
output = self.net(x)
return output.view(-1, 1).squeeze(1)
def check_manual_seed(seed):
"""If manual seed is not specified, choose a random one and communicate it to the user."""
seed = seed or random.randint(1, 10000)
random.seed(seed)
torch.manual_seed(seed)
print(f"Using manual seed: {seed}")
def check_dataset(dataset, dataroot):
"""
Args:
dataset (str): Name of the dataset to use. See CLI help for details
dataroot (str): root directory where the dataset will be stored.
Returns:
dataset (data.Dataset): torchvision Dataset object
"""
resize = transforms.Resize(64)
crop = transforms.CenterCrop(64)
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
if dataset in {"imagenet", "folder", "lfw"}:
dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([resize, crop, to_tensor, normalize]))
nc = 3
elif dataset == "lsun":
dataset = dset.LSUN(
root=dataroot, classes=["bedroom_train"], transform=transforms.Compose([resize, crop, to_tensor, normalize])
)
nc = 3
elif dataset == "cifar10":
dataset = dset.CIFAR10(
root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize])
)
nc = 3
elif dataset == "mnist":
dataset = dset.MNIST(root=dataroot, download=True, transform=transforms.Compose([resize, to_tensor, normalize]))
nc = 1
elif dataset == "fake":
dataset = dset.FakeData(size=256, image_size=(3, 64, 64), transform=to_tensor)
nc = 3
else:
raise RuntimeError(f"Invalid dataset name: {dataset}")
return dataset, nc
def main(
dataset,
dataroot,
z_dim,
g_filters,
d_filters,
batch_size,
epochs,
learning_rate,
beta_1,
saved_G,
saved_D,
seed,
n_workers,
device,
alpha,
output_dir,
):
# seed
check_manual_seed(seed)
# data
dataset, num_channels = check_dataset(dataset, dataroot)
loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=n_workers, drop_last=True)
# netowrks
netG = Generator(z_dim, g_filters, num_channels).to(device)
netD = Discriminator(num_channels, d_filters).to(device)
# criterion
bce = nn.BCELoss()
# optimizers
optimizerG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta_1, 0.999))
# load pre-trained models
if saved_G:
netG.load_state_dict(torch.load(saved_G))
if saved_D:
netD.load_state_dict(torch.load(saved_D))
# misc
real_labels = torch.ones(batch_size, device=device)
fake_labels = torch.zeros(batch_size, device=device)
fixed_noise = torch.randn(batch_size, z_dim, 1, 1, device=device)
def get_noise():
return torch.randn(batch_size, z_dim, 1, 1, device=device)
# The main function, processing a batch of examples
def step(engine, batch):
# unpack the batch. It comes from a dataset, so we have <images, labels> pairs. Discard labels.
real, _ = batch
real = real.to(device)
# -----------------------------------------------------------
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
netD.zero_grad()
# train with real
output = netD(real)
errD_real = bce(output, real_labels)
D_x = output.mean().item()
errD_real.backward()
# get fake image from generator
noise = get_noise()
fake = netG(noise)
# train with fake
output = netD(fake.detach())
errD_fake = bce(output, fake_labels)
D_G_z1 = output.mean().item()
errD_fake.backward()
# gradient update
errD = errD_real + errD_fake
optimizerD.step()
# -----------------------------------------------------------
# (2) Update G network: maximize log(D(G(z)))
netG.zero_grad()
# Update generator. We want to make a step that will make it more likely that discriminator outputs "real"
output = netD(fake)
errG = bce(output, real_labels)
D_G_z2 = output.mean().item()
errG.backward()
# gradient update
optimizerG.step()
return {"errD": errD.item(), "errG": errG.item(), "D_x": D_x, "D_G_z1": D_G_z1, "D_G_z2": D_G_z2}
# ignite objects
trainer = Engine(step)
checkpoint_handler = ModelCheckpoint(output_dir, CKPT_PREFIX, n_saved=10, require_empty=False)
timer = Timer(average=True)
# attach running average metrics
monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"]
RunningAverage(alpha=alpha, output_transform=lambda x: x["errD"]).attach(trainer, "errD")
RunningAverage(alpha=alpha, output_transform=lambda x: x["errG"]).attach(trainer, "errG")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_x"]).attach(trainer, "D_x")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z1"]).attach(trainer, "D_G_z1")
RunningAverage(alpha=alpha, output_transform=lambda x: x["D_G_z2"]).attach(trainer, "D_G_z2")
# attach progress bar
pbar = ProgressBar()
pbar.attach(trainer, metric_names=monitoring_metrics)
@trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))
def print_logs(engine):
fname = output_dir / LOGS_FNAME
columns = ["iteration"] + list(engine.state.metrics.keys())
values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]
with open(fname, "a") as f:
if f.tell() == 0:
print("\t".join(columns), file=f)
print("\t".join(values), file=f)
message = f"[{engine.state.epoch}/{epochs}][{engine.state.iteration % len(loader)}/{len(loader)}]"
for name, value in zip(columns, values):
message += f" | {name}: {value}"
pbar.log_message(message)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def save_fake_example(engine):
fake = netG(fixed_noise)
path = output_dir / FAKE_IMG_FNAME.format(engine.state.epoch)
vutils.save_image(fake.detach(), path, normalize=True)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def save_real_example(engine):
img, y = engine.state.batch
path = output_dir / REAL_IMG_FNAME.format(engine.state.epoch)
vutils.save_image(img, path, normalize=True)
# adding handlers using `trainer.add_event_handler` method API
trainer.add_event_handler(
event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={"netG": netG, "netD": netD}
)
# automatically adding handlers via a special `attach` method of `Timer` handler
timer.attach(
trainer,
start=Events.EPOCH_STARTED,
resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED,
step=Events.ITERATION_COMPLETED,
)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def print_times(engine):
pbar.log_message(f"Epoch {engine.state.epoch} done. Time per batch: {timer.value():.3f}[s]")
timer.reset()
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def create_plots(engine):
try:
import matplotlib as mpl
mpl.use("agg")
import matplotlib.pyplot as plt
import pandas as pd
except ImportError:
warnings.warn("Loss plots will not be generated -- pandas or matplotlib not found")
else:
df = pd.read_csv(output_dir / LOGS_FNAME, delimiter="\t", index_col="iteration")
_ = df.plot(subplots=True, figsize=(20, 20))
_ = plt.xlabel("Iteration number")
fig = plt.gcf()
path = output_dir / PLOT_FNAME
fig.savefig(path)
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EXCEPTION_RAISED)
def handle_exception(engine, e):
if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):
engine.terminate()
warnings.warn("KeyboardInterrupt caught. Exiting gracefully.")
create_plots(engine)
checkpoint_handler(engine, {"netG_exception": netG, "netD_exception": netD})
else:
raise e
# Setup is done. Now let's run the training
trainer.run(loader, epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
required=True,
choices={"cifar10", "lsun", "imagenet", "folder", "lfw", "fake", "mnist"},
help="Type of the dataset to be used.",
)
parser.add_argument("--dataroot", required=True, help="path to dataset")
parser.add_argument("--workers", type=int, default=2, help="number of data loading workers")
parser.add_argument("--batch-size", type=int, default=64, help="input batch size")
parser.add_argument("--z-dim", type=int, default=100, help="size of the latent z vector")
parser.add_argument(
"--g-filters", type=int, default=64, help="Number of filters in the second-to-last generator deconv layer"
)
parser.add_argument("--d-filters", type=int, default=64, help="Number of filters in first discriminator conv layer")
parser.add_argument("--epochs", type=int, default=25, help="number of epochs to train for")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--beta-1", type=float, default=0.5, help="beta_1 for adam")
parser.add_argument("--no-cuda", action="store_true", help="disables cuda")
parser.add_argument("--saved-G", default="", help="path to pickled generator (to continue training)")
parser.add_argument("--saved-D", default="", help="path to pickled discriminator (to continue training)")
parser.add_argument("--output-dir", default=".", help="directory to output images and model checkpoints")
parser.add_argument("--seed", type=int, help="manual seed")
parser.add_argument("--alpha", type=float, default=0.98, help="smoothing constant for exponential moving averages")
args = parser.parse_args()
dev = "cpu" if (not torch.cuda.is_available() or args.no_cuda) else "cuda:0"
args.output_dir = Path(args.output_dir)
try:
args.output_dir.mkdir(parents=True)
except FileExistsError:
if (not args.output_dir.is_dir()) or (len(os.listdir(args.output_dir)) > 0):
raise FileExistsError("Please provide a path to a non-existing or empty directory.")
main(
dataset=args.dataset,
dataroot=args.dataroot,
z_dim=args.z_dim,
g_filters=args.g_filters,
d_filters=args.d_filters,
batch_size=args.batch_size,
epochs=args.epochs,
learning_rate=args.lr,
beta_1=args.beta_1,
saved_D=args.saved_D,
saved_G=args.saved_G,
seed=args.seed,
device=dev,
n_workers=args.workers,
alpha=args.alpha,
output_dir=args.output_dir,
)
|
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
scaler = GradScaler()
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with autocast():
y_pred = model(x)
loss = criterion(y_pred, y)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same precision that autocast used for corresponding forward ops.
scaler.scale(loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
|
import fire
import torch
from apex import amp
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10, opt="O1"):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
model, optimizer = amp.initialize(model, optimizer, opt_level=opt)
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
|
import random
from torch.utils.data import DataLoader, Subset
from torchvision.datasets.cifar import CIFAR100
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomErasing, RandomHorizontalFlip, ToTensor
def get_train_eval_loaders(path, batch_size=256):
"""Setup the dataflow:
- load CIFAR100 train and test datasets
- setup train/test image transforms
- horizontally flipped randomly and augmented using cutout.
- each mini-batch contained 256 examples
- setup train/test data loaders
Returns:
train_loader, test_loader, eval_train_loader
"""
train_transform = Compose(
[
Pad(4),
RandomCrop(32),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErasing(),
]
)
test_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_dataset = CIFAR100(root=path, train=True, transform=train_transform, download=True)
test_dataset = CIFAR100(root=path, train=False, transform=test_transform, download=False)
train_eval_indices = [random.randint(0, len(train_dataset) - 1) for i in range(len(test_dataset))]
train_eval_dataset = Subset(train_dataset, train_eval_indices)
train_loader = DataLoader(
train_dataset, batch_size=batch_size, num_workers=12, shuffle=True, drop_last=True, pin_memory=True
)
test_loader = DataLoader(
test_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)
eval_train_loader = DataLoader(
train_eval_dataset, batch_size=batch_size, num_workers=12, shuffle=False, drop_last=False, pin_memory=True
)
return train_loader, test_loader, eval_train_loader
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.