python_code
stringlengths 0
229k
|
---|
from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer, Tbptt_Events
|
import numbers
import warnings
from functools import partial
from typing import Any, Callable, cast, Dict, Iterable, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data.distributed import DistributedSampler
# https://github.com/pytorch/ignite/issues/2773
try:
from torch.optim.lr_scheduler import LRScheduler as PyTorchLRScheduler
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as PyTorchLRScheduler
import ignite.distributed as idist
from ignite.contrib.handlers import (
ClearMLLogger,
global_step_from_engine,
MLflowLogger,
NeptuneLogger,
PolyaxonLogger,
ProgressBar,
TensorboardLogger,
VisdomLogger,
WandBLogger,
)
from ignite.contrib.handlers.base_logger import BaseLogger
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, TerminateOnNan
from ignite.handlers.checkpoint import BaseSaveHandler
from ignite.handlers.param_scheduler import ParamScheduler
from ignite.metrics import RunningAverage
from ignite.metrics.metric import RunningBatchWise
from ignite.utils import deprecated
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.terminate_on_nan.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.checkpoint.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.checkpoint.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.terminate_on_nan.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
setup_common_distrib_training_handlers = setup_common_training_handlers
def _setup_common_training_handlers(
trainer: Engine,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
if output_path is not None and save_handler is not None:
raise ValueError(
"Arguments output_path and save_handler are mutually exclusive. Please, define only one of them"
)
if stop_on_nan:
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
if lr_scheduler is not None:
if isinstance(lr_scheduler, PyTorchLRScheduler):
trainer.add_event_handler(
Events.ITERATION_COMPLETED, lambda engine: cast(PyTorchLRScheduler, lr_scheduler).step()
)
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
if torch.cuda.is_available() and clear_cuda_cache:
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
if to_save is not None:
if output_path is None and save_handler is None:
raise ValueError(
"If to_save argument is provided then output_path or save_handler arguments should be also defined"
)
if output_path is not None:
save_handler = DiskSaver(dirname=output_path, require_empty=False)
checkpoint_handler = Checkpoint(
to_save, cast(Union[Callable, BaseSaveHandler], save_handler), filename_prefix="training", **kwargs
)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler)
if with_gpu_stats:
GpuInfo().attach(
trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) # type: ignore[arg-type]
)
if output_names is not None:
def output_transform(x: Any, index: int, name: str) -> Any:
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise TypeError(
"Unhandled type of update_function's output. "
f"It should either mapping or sequence, but given {type(x)}"
)
for i, n in enumerate(output_names):
RunningAverage(output_transform=partial(output_transform, index=i, name=n)).attach(
trainer, n, usage=RunningBatchWise()
)
if with_pbars:
if with_pbar_on_iters:
ProgressBar(persist=False).attach(
trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)
)
ProgressBar(persist=True, bar_format="").attach(
trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED
)
def _setup_common_distrib_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
_setup_common_training_handlers(
trainer,
to_save=to_save,
output_path=output_path,
save_every_iters=save_every_iters,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=(idist.get_rank() == 0) and with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
if train_sampler is not None:
if not isinstance(train_sampler, DistributedSampler):
raise TypeError("Train sampler should be torch DistributedSampler and have `set_epoch` method")
@trainer.on(Events.EPOCH_STARTED)
def distrib_set_epoch(engine: Engine) -> None:
train_sampler.set_epoch(engine.state.epoch - 1)
def empty_cuda_cache(_: Engine) -> None:
torch.cuda.empty_cache()
import gc
gc.collect()
@deprecated(
"0.4.0",
"0.6.0",
("Please use instead: setup_tb_logging, setup_visdom_logging or setup_mlflow_logging etc.",),
raise_exception=True,
)
def setup_any_logging(
logger: BaseLogger,
logger_module: Any,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer], Dict[None, Optimizer]]],
evaluators: Optional[Union[Engine, Dict[str, Engine]]],
log_every_iters: int,
) -> None:
pass
def _setup_logging(
logger: BaseLogger,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer], Dict[None, Optimizer]]],
evaluators: Optional[Union[Engine, Dict[str, Engine]]],
log_every_iters: int,
) -> None:
if optimizers is not None:
if not isinstance(optimizers, (Optimizer, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if evaluators is not None:
if not isinstance(evaluators, (Engine, Mapping)):
raise TypeError("Argument evaluators should be either a single engine or a dictionary or engines")
if log_every_iters is None:
log_every_iters = 1
logger.attach_output_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=log_every_iters), tag="training", metric_names="all"
)
if optimizers is not None:
# Log optimizer parameters
if isinstance(optimizers, Optimizer):
optimizers = {None: optimizers}
for k, optimizer in optimizers.items():
logger.attach_opt_params_handler(
trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=k
)
if evaluators is not None:
# Log evaluation metrics
if isinstance(evaluators, Engine):
evaluators = {"validation": evaluators}
event_name = Events.ITERATION_COMPLETED if isinstance(logger, WandBLogger) else None
gst = global_step_from_engine(trainer, custom_event_name=event_name)
for k, evaluator in evaluators.items():
logger.attach_output_handler(
evaluator, event_name=Events.COMPLETED, tag=k, metric_names="all", global_step_transform=gst
)
def setup_tb_logging(
output_path: str,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> TensorboardLogger:
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
output_path: logging directory path
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.tensorboard_logger.TensorboardLogger`
"""
logger = TensorboardLogger(log_dir=output_path, **kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_visdom_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> VisdomLogger:
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger`
"""
logger = VisdomLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_mlflow_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> MLflowLogger:
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.mlflow_logger.MLflowLogger`
"""
logger = MLflowLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_neptune_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> NeptuneLogger:
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.neptune_logger.NeptuneLogger`
"""
logger = NeptuneLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_wandb_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> WandBLogger:
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.wandb_logger.WandBLogger`
"""
logger = WandBLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_plx_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> PolyaxonLogger:
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.polyaxon_logger.PolyaxonLogger`
"""
logger = PolyaxonLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_clearml_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> ClearMLLogger:
"""Method to setup ClearML logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`
"""
logger = ClearMLLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_trains_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> ClearMLLogger:
"""``setup_trains_logging`` was renamed to :func:`~ignite.contrib.engines.common.setup_clearml_logging`."""
warnings.warn("setup_trains_logging was renamed to setup_clearml_logging.")
return setup_clearml_logging(trainer, optimizers, evaluators, log_every_iters, **kwargs)
get_default_score_fn = Checkpoint.get_default_score_fn
def gen_save_best_models_by_val_score(
save_handler: Union[Callable, BaseSaveHandler],
evaluator: Engine,
models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],
metric_name: str,
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
score_sign: float = 1.0,
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Models with highest metric value will be retained. The logic of how to store objects is delegated to
``save_handler``.
Args:
save_handler: Method or callable class to
use to save engine and other provided objects. Function receives two objects: checkpoint as a dictionary
and filename. If ``save_handler`` is callable class, it can
inherit of :class:`~ignite.handlers.checkpoint.BaseSaveHandler` and optionally implement ``remove`` method
to keep a fixed number of saved checkpoints. In case if user needs to save engine's checkpoint on a disk,
``save_handler`` can be defined with :class:`~ignite.handlers.DiskSaver`.
evaluator: evaluation engine used to provide the score
models: model or dictionary with the object to save. Objects should have
implemented ``state_dict`` and ``load_state_dict`` methods.
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved: number of best models to store
trainer: trainer engine to fetch the epoch when saving the best model.
tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
Returns:
A :class:`~ignite.handlers.checkpoint.Checkpoint` handler.
"""
global_step_transform = None
if trainer is not None:
global_step_transform = global_step_from_engine(trainer)
if isinstance(models, nn.Module):
to_save: Dict[str, nn.Module] = {"model": models}
else:
to_save = models
best_model_handler = Checkpoint(
to_save,
save_handler,
filename_prefix="best",
n_saved=n_saved,
global_step_transform=global_step_transform,
score_name=f"{tag}_{metric_name.lower()}",
score_function=get_default_score_fn(metric_name, score_sign=score_sign),
**kwargs,
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
return best_model_handler
def save_best_model_by_val_score(
output_path: str,
evaluator: Engine,
model: torch.nn.Module,
metric_name: str,
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
score_sign: float = 1.0,
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Models with highest metric value will be retained.
Args:
output_path: output path to indicate where to save best models
evaluator: evaluation engine used to provide the score
model: model to store
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved: number of best models to store
trainer: trainer engine to fetch the epoch when saving the best model.
tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
Returns:
A :class:`~ignite.handlers.checkpoint.Checkpoint` handler.
"""
return gen_save_best_models_by_val_score(
save_handler=DiskSaver(dirname=output_path, require_empty=False),
evaluator=evaluator,
models=model,
metric_name=metric_name,
n_saved=n_saved,
trainer=trainer,
tag=tag,
score_sign=score_sign,
**kwargs,
)
def add_early_stopping_by_val_score(
patience: int,
evaluator: Engine,
trainer: Engine,
metric_name: str,
score_sign: float = 1.0,
) -> EarlyStopping:
"""Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.
Metric value should increase in order to keep training and not early stop.
Args:
patience: number of events to wait if no improvement and then stop the training.
evaluator: evaluation engine used to provide the score
trainer: trainer engine to stop the run if no improvement.
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
Returns:
A :class:`~ignite.handlers.early_stopping.EarlyStopping` handler.
"""
es_handler = EarlyStopping(
patience=patience, score_function=get_default_score_fn(metric_name, score_sign=score_sign), trainer=trainer
)
evaluator.add_event_handler(Events.COMPLETED, es_handler)
return es_handler
|
# coding: utf-8
import collections.abc as collections
from typing import Callable, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from ignite.engine import _prepare_batch, Engine, EventEnum
from ignite.utils import apply_to_tensor
class Tbptt_Events(EventEnum):
"""Aditional tbptt events.
Additional events for truncated backpropagation throught time dedicated
trainer.
"""
TIME_ITERATION_STARTED = "time_iteration_started"
TIME_ITERATION_COMPLETED = "time_iteration_completed"
def _detach_hidden(
hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Cut backpropagation graph.
Auxillary function to cut the backpropagation graph by detaching the hidden
vector.
"""
return apply_to_tensor(hidden, torch.Tensor.detach)
def create_supervised_tbptt_trainer(
model: nn.Module,
optimizer: Optimizer,
loss_fn: nn.Module,
tbtt_step: int,
dim: int = 0,
device: Optional[str] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
) -> Engine:
"""Create a trainer for truncated backprop through time supervised models.
Training recurrent model on long sequences is computationally intensive as
it requires to process the whole sequence before getting a gradient.
However, when the training loss is computed over many outputs
(`X to many <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`_),
there is an opportunity to compute a gradient over a subsequence. This is
known as
`truncated backpropagation through time <https://machinelearningmastery.com/
gentle-introduction-backpropagation-time/>`_.
This supervised trainer apply gradient optimization step every `tbtt_step`
time steps of the sequence, while backpropagating through the same
`tbtt_step` time steps.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
tbtt_step: the length of time chunks (last one may be smaller).
dim: axis representing the time dimension.
device: device type specification (default: None).
Applies to batches.
non_blocking: if True and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
prepare_batch: function that receives `batch`, `device`,
`non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`.
Returns:
a trainer engine with supervised update function.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
* `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
* `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
"""
def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> float:
loss_list = []
hidden = None
x, y = batch
for batch_t in zip(x.split(tbtt_step, dim=dim), y.split(tbtt_step, dim=dim)):
x_t, y_t = prepare_batch(batch_t, device=device, non_blocking=non_blocking)
# Fire event for start of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_STARTED)
# Forward, backward and
model.train()
optimizer.zero_grad()
if hidden is None:
y_pred_t, hidden = model(x_t)
else:
hidden = _detach_hidden(hidden)
y_pred_t, hidden = model(x_t, hidden)
loss_t = loss_fn(y_pred_t, y_t)
loss_t.backward()
optimizer.step()
# Setting state of engine for consistent behaviour
engine.state.output = loss_t.item()
loss_list.append(loss_t.item())
# Fire event for end of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_COMPLETED)
# return average loss over the time splits
return sum(loss_list) / len(loss_list)
engine = Engine(_update)
engine.register_events(*Tbptt_Events)
return engine
|
""" ``ignite.contrib.handlers.param_scheduler`` was moved to ``ignite.handlers.param_scheduler``.
Note:
``ignite.contrib.handlers.param_scheduler`` was moved to ``ignite.handlers.param_scheduler``.
Please refer to :mod:`~ignite.handlers.param_scheduler`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/param_scheduler.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
CyclicalScheduler,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
)
__all__ = [
"ConcatScheduler",
"CosineAnnealingScheduler",
"LinearCyclicalScheduler",
"LRScheduler",
"ParamGroupScheduler",
"ParamScheduler",
"PiecewiseLinear",
"CyclicalScheduler",
"create_lr_scheduler_with_warmup",
]
ConcatScheduler = ConcatScheduler
CosineAnnealingScheduler = CosineAnnealingScheduler
LinearCyclicalScheduler = LinearCyclicalScheduler
LRScheduler = LRScheduler
ParamGroupScheduler = ParamGroupScheduler
ParamScheduler = ParamScheduler
PiecewiseLinear = PiecewiseLinear
CyclicalScheduler = CyclicalScheduler
create_lr_scheduler_with_warmup = create_lr_scheduler_with_warmup
|
"""MLflow logger and its helper handlers."""
import warnings
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["MLflowLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class MLflowLogger(BaseLogger):
"""
`MLflow <https://mlflow.org>`_ tracking client handler to log parameters and metrics during the training
and validation.
This class requires `mlflow package <https://github.com/mlflow/mlflow/>`_ to be installed:
.. code-block:: bash
pip install mlflow
Args:
tracking_uri: MLflow tracking uri. See MLflow docs for more details
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Log experiment parameters:
mlflow_logger.log_params({
"seed": seed,
"batch_size": batch_size,
"model": model.__class__.__name__,
"pytorch version": torch.__version__,
"ignite version": ignite.__version__,
"cuda version": torch.version.cuda,
"device name": torch.cuda.get_device_name(0)
})
# Attach the logger to the trainer to log training loss at each iteration
mlflow_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {'loss': loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
mlflow_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
mlflow_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
"""
def __init__(self, tracking_uri: Optional[str] = None):
try:
import mlflow
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires mlflow to be installed. "
"Please install it with command: \n pip install mlflow"
)
if tracking_uri is not None:
mlflow.set_tracking_uri(tracking_uri)
self.active_run = mlflow.active_run()
if self.active_run is None:
self.active_run = mlflow.start_run()
def __getattr__(self, attr: Any) -> Any:
import mlflow
return getattr(mlflow, attr)
def close(self) -> None:
import mlflow
mlflow.end_run()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, 'training'
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.mlflow_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
mlflow_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
mlflow_logger = MLflowLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on MLflow.
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
mlflow_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
) -> None:
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, MLflowLogger):
raise TypeError("Handler 'OutputHandler' works only with MLflowLogger")
rendered_metrics = self._setup_output_metrics_state_attrs(engine)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
# Additionally recheck metric names as MLflow rejects non-valid names with MLflowException
from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES
metrics = {}
for keys, value in rendered_metrics.items():
key = " ".join(keys)
metrics[key] = value
for key in list(metrics.keys()):
if not _VALID_PARAM_AND_METRIC_NAMES.match(key):
warnings.warn(
f"MLflowLogger output_handler encountered an invalid metric name '{key}' that "
"will be ignored and not logged to MLflow"
)
del metrics[key]
logger.log_metrics(metrics, step=global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, 'generator'
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Optionally, user can specify tracking_uri with corresponds to MLFLOW_TRACKING_URI
# mlflow_logger = MLflowLogger(tracking_uri="uri")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
mlflow_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
mlflow_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, MLflowLogger):
raise TypeError("Handler OptimizerParamsHandler works only with MLflowLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag} " if self.tag else ""
params = {
f"{tag_prefix}{self.param_name} group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
logger.log_metrics(params, step=global_step)
|
"""Polyaxon logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["PolyaxonLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class PolyaxonLogger(BaseLogger):
"""
`Polyaxon tracking client <https://polyaxon.com/>`_ handler to log parameters and metrics during the training
and validation.
This class requires `polyaxon <https://github.com/polyaxon/polyaxon/>`_ package to be installed:
.. code-block:: bash
pip install polyaxon
// If you are using polyaxon v0.x
pip install polyaxon-client
Args:
args: Positional arguments accepted from
`Experiment <https://polyaxon.com/docs/experimentation/tracking/client/>`_.
kwargs: Keyword arguments accepted from
`Experiment <https://polyaxon.com/docs/experimentation/tracking/client/>`_.
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Log experiment parameters:
plx_logger.log_inputs(**{
"seed": seed,
"batch_size": batch_size,
"model": model.__class__.__name__,
"pytorch version": torch.__version__,
"ignite version": ignite.__version__,
"cuda version": torch.version.cuda,
"device name": torch.cuda.get_device_name(0)
})
# Attach the logger to the trainer to log training loss at each iteration
plx_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
plx_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
plx_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# to manually end a run
plx_logger.close()
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
from polyaxon.tracking import Run
self.experiment = Run(*args, **kwargs)
except ImportError:
try:
from polyaxon_client.tracking import Experiment
self.experiment = Experiment(*args, **kwargs)
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires polyaxon to be installed.\n"
"For Polyaxon v1.x please install it with command: \n pip install polyaxon\n"
"For Polyaxon v0.x please install it with command: \n pip install polyaxon-client"
)
def close(self) -> None:
try:
self.experiment.end()
except:
pass
def __getattr__(self, attr: Any) -> Any:
return getattr(self.experiment, attr)
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.polyaxon_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
plx_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
plx_logger = PolyaxonLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Polyaxon.
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
plx_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, PolyaxonLogger):
raise RuntimeError("Handler 'OutputHandler' works only with PolyaxonLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
metrics.update({"step": global_step})
logger.log_metrics(**metrics)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
plx_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
plx_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, PolyaxonLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with PolyaxonLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
params["step"] = global_step
logger.log_metrics(**params)
|
"""TensorBoard logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = [
"TensorboardLogger",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"WeightsHistHandler",
"GradsScalarHandler",
"GradsHistHandler",
"global_step_from_engine",
]
class TensorboardLogger(BaseLogger):
"""
TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.
By default, this class favors `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package if installed:
.. code-block:: bash
pip install tensorboardX
otherwise, it falls back to using
`PyTorch's SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_
(>=v1.2.0).
Args:
args: Positional arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
kwargs: Keyword arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
For example, `log_dir` to setup path to the directory where to log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
tb_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
# Attach the logger to the trainer to log model's weights as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=WeightsHistHandler(model)
)
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model)
)
# Attach the logger to the trainer to log model's gradients as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=GradsHistHandler(model)
)
# We need to close the logger when we are done
tb_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
self.writer = SummaryWriter(*args, **kwargs)
def __getattr__(self, attr: Any) -> Any:
return getattr(self.writer, attr)
def close(self) -> None:
self.writer.close()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output, engine's state attributes and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.tensorboard_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
tb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Tensorboard.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
tb_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'OutputHandler' works only with TensorboardLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
logger.writer.add_scalar(key, value, global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger.writer.add_scalar(k, v, global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log only `fc` weights
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
name = name.replace(".", "/")
logger.writer.add_scalar(
f"{tag_prefix}weights_{self.reduction.__name__}/{name}",
self.reduction(p.data),
global_step,
)
class WeightsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's weights as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights of `fc` layer
weights = ['fc']
# Attach the logger to the trainer to log weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weights)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights which name include 'conv'.
weight_selector = lambda name, p: 'conv' in name
# Attach the logger to the trainer to log weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weight_selector)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
name = name.replace(".", "/")
logger.writer.add_histogram(
tag=f"{tag_prefix}weights/{name}", values=p.data.cpu().numpy(), global_step=global_step
)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of `base`
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_scalar(
f"{tag_prefix}grads_{self.reduction.__name__}/{name}", self.reduction(p.grad), global_step
)
class GradsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's gradients as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of `fc.bias`
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=['fc.bias'])
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of weights which have shape (2, 1)
def has_shape_2_1(n, p):
return p.shape == (2,1)
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_histogram(
tag=f"{tag_prefix}grads/{name}", values=p.grad.cpu().numpy(), global_step=global_step
)
|
"""WandB logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["WandBLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class WandBLogger(BaseLogger):
"""`Weights & Biases <https://wandb.ai/site>`_ handler to log metrics, model/optimizer parameters, gradients
during training and validation. It can also be used to log model checkpoints to the Weights & Biases cloud.
.. code-block:: bash
pip install wandb
This class is also a wrapper for the wandb module. This means that you can call any wandb function using
this wrapper. See examples on how to save model parameters and gradients.
Args:
args: Positional arguments accepted by `wandb.init`.
kwargs: Keyword arguments accepted by `wandb.init`.
Please see `wandb.init <https://docs.wandb.ai/library/init>`_ for documentation of possible parameters.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the trainer to log training loss at each iteration
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=lambda *_: trainer.state.iteration` to take iteration value
# of the `trainer`:
wandb_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=lambda *_: trainer.state.iteration` to take iteration value
# of the `trainer` instead of `evaluator`.
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
wandb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# We need to close the logger when we are done
wandb_logger.close()
If you want to log model gradients, the model call graph, etc., use the logger as wrapper of wandb. Refer
to the documentation of wandb.watch for details:
.. code-block:: python
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
model = torch.nn.Sequential(...)
wandb_logger.watch(model)
For model checkpointing, Weights & Biases creates a local run dir, and automatically synchronizes all
files saved there at the end of the run. You can just use the `wandb_logger.run.dir` as path for the
`ModelCheckpoint`:
.. code-block:: python
from ignite.handlers import ModelCheckpoint
def score_function(engine):
return engine.state.metrics['accuracy']
model_checkpoint = ModelCheckpoint(
wandb_logger.run.dir, n_saved=2, filename_prefix='best',
require_empty=False, score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {'model': model})
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
import wandb
self._wandb = wandb
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires wandb to be installed. "
"You man install wandb with the command:\n pip install wandb\n"
)
if kwargs.get("init", True):
wandb.init(*args, **kwargs)
def __getattr__(self, attr: Any) -> Any:
return getattr(self._wandb, attr)
def close(self) -> None:
self._wandb.finish()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.wandb_logger.global_step_from_engine`.
sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=lambda *_: trainer.state.iteration,` to take iteration value
# of the `trainer`:
wandb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Weights & Biases.
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
sync: Optional[bool] = None,
state_attributes: Optional[List[str]] = None,
):
super().__init__(tag, metric_names, output_transform, global_step_transform, state_attributes)
self.sync = sync
def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, WandBLogger):
raise RuntimeError(f"Handler '{self.__class__.__name__}' works only with WandBLogger.")
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
metrics = self._setup_output_metrics_state_attrs(engine, log_text=True, key_tuple=False)
logger.log(metrics, step=global_step, sync=self.sync)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
wandb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
wandb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
self.sync = sync
def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, WandBLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with WandBLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
logger.log(params, step=global_step, sync=self.sync)
|
"""Visdom logger and its helper handlers."""
import os
from typing import Any, Callable, cast, Dict, List, Optional, Union
import torch
import torch.nn as nn
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = [
"VisdomLogger",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"GradsScalarHandler",
"global_step_from_engine",
]
class VisdomLogger(BaseLogger):
"""
VisdomLogger handler to log metrics, model/optimizer parameters, gradients during the training and validation.
This class requires `visdom <https://github.com/fossasia/visdom/>`_ package to be installed:
.. code-block:: bash
pip install git+https://github.com/fossasia/visdom.git
Args:
server: visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL`
port: visdom server's port. It can be also specified by environment variable `VISDOM_PORT`
num_workers: number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to
visdom server. Default, `num_workers=1`. If `num_workers=0` and logger uses the main thread. If using
Python 2.7 and `num_workers>0` the package `futures` should be installed: `pip install futures`
kwargs: kwargs to pass into
`visdom.Visdom <https://github.com/fossasia/visdom#visdom-arguments-python-only>`_.
Note:
We can also specify username/password using environment variables: VISDOM_USERNAME, VISDOM_PASSWORD
.. warning::
Frequent logging, e.g. when logger is attached to `Events.ITERATION_COMPLETED`, can slow down the run if the
main thread is used to send the data to visdom server (`num_workers=0`). To avoid this situation we can either
log less frequently or set `num_workers=1`.
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log training loss at each iteration
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
vd_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
vd_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
# Attach the logger to the trainer to log model's gradients norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model)
)
# We need to close the logger with we are done
vd_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
with VisdomLogger() as vd_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
server: Optional[str] = None,
port: Optional[int] = None,
num_workers: int = 1,
raise_exceptions: bool = True,
**kwargs: Any,
):
try:
import visdom
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires visdom package. "
"Please install it with command:\n"
"pip install git+https://github.com/fossasia/visdom.git"
)
if num_workers > 0:
# If visdom is installed, one of its dependencies `tornado`
# requires also `futures` to be installed.
# Let's check anyway if we can import it.
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires concurrent.futures module"
"Please install it with command:\n"
"pip install futures"
)
if server is None:
server = cast(str, os.environ.get("VISDOM_SERVER_URL", "localhost"))
if port is None:
port = int(os.environ.get("VISDOM_PORT", 8097))
if "username" not in kwargs:
username = os.environ.get("VISDOM_USERNAME", None)
kwargs["username"] = username
if "password" not in kwargs:
password = os.environ.get("VISDOM_PASSWORD", None)
kwargs["password"] = password
self.vis = visdom.Visdom(server=server, port=port, raise_exceptions=raise_exceptions, **kwargs)
if not self.vis.offline and not self.vis.check_connection(): # type: ignore[attr-defined]
raise RuntimeError(f"Failed to connect to Visdom server at {server}. Did you run python -m visdom.server ?")
self.executor: Union[_DummyExecutor, "ThreadPoolExecutor"] = _DummyExecutor()
if num_workers > 0:
self.executor = ThreadPoolExecutor(max_workers=num_workers)
def _save(self) -> None:
self.vis.save([self.vis.env]) # type: ignore[attr-defined]
def close(self) -> None:
self.executor.shutdown()
self.vis.close()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class _BaseVisDrawer:
def __init__(self, show_legend: bool = False):
self.windows: Dict[str, Any] = {}
self.show_legend = show_legend
def add_scalar(
self, logger: VisdomLogger, k: str, v: Union[str, float, torch.Tensor], event_name: Any, global_step: int
) -> None:
"""
Helper method to log a scalar with VisdomLogger.
Args:
logger: visdom logger
k: scalar name which is used to set window title and y-axis label
v: scalar value, y-axis value
event_name: Event name which is used to setup x-axis label. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
global_step: global step, x-axis value
"""
if k not in self.windows:
self.windows[k] = {
"win": None,
"opts": {"title": k, "xlabel": str(event_name), "ylabel": k, "showlegend": self.show_legend},
}
update = None if self.windows[k]["win"] is None else "append"
kwargs = {
"X": [global_step],
"Y": [v],
"env": logger.vis.env, # type: ignore[attr-defined]
"win": self.windows[k]["win"],
"update": update,
"opts": self.windows[k]["opts"],
"name": k,
}
future = logger.executor.submit(logger.vis.line, **kwargs)
if self.windows[k]["win"] is None:
self.windows[k]["win"] = future.result()
class OutputHandler(BaseOutputHandler, _BaseVisDrawer):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.visdom_logger.global_step_from_engine`.
show_legend: flag to show legend in the window
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
vd_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
vd_logger = VisdomLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Visdom.
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
vd_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
"""
def __init__(
self,
tag: str,
metric_names: Optional[str] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
show_legend: bool = False,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'OutputHandler' works only with VisdomLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
self.add_scalar(logger, key, value, event_name, global_step)
logger._save()
class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vb_logger = VisdomLogger()
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
vd_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
vd_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""Helper handler to log model's weights as scalars.
Handler iterates over named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.model.named_parameters():
name = name.replace(".", "/")
k = f"{tag_prefix}weights_{self.reduction.__name__}/{name}"
v = self.reduction(p.data)
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""Helper handler to log model's gradients as scalars.
Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(GradsScalarHandler, self).__init__(model, reduction, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
k = f"{tag_prefix}grads_{self.reduction.__name__}/{name}"
v = self.reduction(p.grad)
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class _DummyExecutor:
class _DummyFuture:
def __init__(self, result: Any) -> None:
self._output = result
def result(self) -> Any:
return self._output
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
def submit(self, fn: Callable, **kwargs: Any) -> "_DummyFuture":
return _DummyExecutor._DummyFuture(fn(**kwargs))
def shutdown(self, *args: Any, **kwargs: Any) -> None:
pass
|
""" ``ignite.contrib.handlers.lr_finder`` was moved to ``ignite.handlers.lr_finder``.
Note:
``ignite.contrib.handlers.lr_finder`` was moved to ``ignite.handlers.lr_finder``.
Please refer to :mod:`~ignite.handlers.lr_finder`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/lr_finder.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.lr_finder import FastaiLRFinder
__all__ = [
"FastaiLRFinder",
]
FastaiLRFinder = FastaiLRFinder
|
from ignite.contrib.handlers.clearml_logger import ClearMLLogger
from ignite.contrib.handlers.mlflow_logger import MLflowLogger
from ignite.contrib.handlers.neptune_logger import NeptuneLogger
from ignite.contrib.handlers.polyaxon_logger import PolyaxonLogger
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from ignite.contrib.handlers.visdom_logger import VisdomLogger
from ignite.contrib.handlers.wandb_logger import WandBLogger
from ignite.handlers import EpochOutputStore, global_step_from_engine # ref # ref
from ignite.handlers.lr_finder import FastaiLRFinder
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
PiecewiseLinear,
)
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
|
"""Base logger and its helper handlers."""
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from torch.optim import Optimizer
from ignite.engine import Engine, Events, EventsList, State
from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle
class BaseHandler(metaclass=ABCMeta):
"""Base handler for defining various useful handlers."""
@abstractmethod
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
class BaseWeightsHandler(BaseHandler):
"""
Base handler for logging weights or their gradients.
"""
def __init__(
self,
model: nn.Module,
tag: Optional[str] = None,
whitelist: Optional[Union[List[str], Callable[[str, nn.Parameter], bool]]] = None,
):
if not isinstance(model, torch.nn.Module):
raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}")
self.model = model
self.tag = tag
weights = {}
if whitelist is None:
weights = dict(model.named_parameters())
elif callable(whitelist):
for n, p in model.named_parameters():
if whitelist(n, p):
weights[n] = p
else:
for n, p in model.named_parameters():
for item in whitelist:
if n.startswith(item):
weights[n] = p
self.weights = weights.items()
class BaseOptimizerParamsHandler(BaseHandler):
"""
Base handler for logging optimizer parameters
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
if not (
isinstance(optimizer, Optimizer)
or (hasattr(optimizer, "param_groups") and isinstance(optimizer.param_groups, Sequence))
):
raise TypeError(
"Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, "
f"but given {type(optimizer)}"
)
self.optimizer = optimizer
self.param_name = param_name
self.tag = tag
class BaseOutputHandler(BaseHandler):
"""
Helper handler to log engine's output and/or metrics
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
if metric_names is not None:
if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")):
raise TypeError(
f"metric_names should be either a list or equal 'all', got {type(metric_names)} instead."
)
if output_transform is not None and not callable(output_transform):
raise TypeError(f"output_transform should be a function, got {type(output_transform)} instead.")
if output_transform is None and metric_names is None and state_attributes is None:
raise ValueError("Either metric_names, output_transform or state_attributes should be defined")
if global_step_transform is not None and not callable(global_step_transform):
raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.")
if global_step_transform is None:
def global_step_transform(engine: Engine, event_name: Union[str, Events]) -> int:
return engine.state.get_event_attrib_value(event_name)
self.tag = tag
self.metric_names = metric_names
self.output_transform = output_transform
self.global_step_transform = global_step_transform
self.state_attributes = state_attributes
def _setup_output_metrics_state_attrs(
self, engine: Engine, log_text: Optional[bool] = False, key_tuple: Optional[bool] = True
) -> Dict[Any, Any]:
"""Helper method to setup metrics and state attributes to log"""
metrics_state_attrs = OrderedDict()
if self.metric_names is not None:
if isinstance(self.metric_names, str) and self.metric_names == "all":
metrics_state_attrs = OrderedDict(engine.state.metrics)
else:
for name in self.metric_names:
if name not in engine.state.metrics:
warnings.warn(
f"Provided metric name '{name}' is missing "
f"in engine's state metrics: {list(engine.state.metrics.keys())}"
)
continue
metrics_state_attrs[name] = engine.state.metrics[name]
if self.output_transform is not None:
output_dict = self.output_transform(engine.state.output)
if not isinstance(output_dict, dict):
output_dict = {"output": output_dict}
metrics_state_attrs.update(output_dict)
if self.state_attributes is not None:
metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})
metrics_state_attrs_dict: Dict[Any, Union[str, float, numbers.Number]] = OrderedDict()
def key_tuple_tf(tag: str, name: str, *args: str) -> Tuple[str, ...]:
return (tag, name) + args
def key_str_tf(tag: str, name: str, *args: str) -> str:
return "/".join((tag, name) + args)
key_tf = key_tuple_tf if key_tuple else key_str_tf
for name, value in metrics_state_attrs.items():
if isinstance(value, numbers.Number):
metrics_state_attrs_dict[key_tf(self.tag, name)] = value
elif isinstance(value, torch.Tensor) and value.ndimension() == 0:
metrics_state_attrs_dict[key_tf(self.tag, name)] = value.item()
elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
for i, v in enumerate(value):
metrics_state_attrs_dict[key_tf(self.tag, name, str(i))] = v.item()
else:
if isinstance(value, str) and log_text:
metrics_state_attrs_dict[key_tf(self.tag, name)] = value
else:
warnings.warn(f"Logger output_handler can not log metrics value type {type(value)}")
return metrics_state_attrs_dict
class BaseWeightsScalarHandler(BaseWeightsHandler):
"""
Helper handler to log model's weights or gradients as scalars.
"""
def __init__(
self,
model: nn.Module,
reduction: Callable[[torch.Tensor], Union[float, torch.Tensor]] = torch.norm,
tag: Optional[str] = None,
whitelist: Optional[Union[List[str], Callable[[str, nn.Parameter], bool]]] = None,
):
super(BaseWeightsScalarHandler, self).__init__(model, tag=tag, whitelist=whitelist)
if not callable(reduction):
raise TypeError(f"Argument reduction should be callable, but given {type(reduction)}")
def _is_0D_tensor(t: Any) -> bool:
return isinstance(t, torch.Tensor) and t.ndimension() == 0
# Test reduction function on a tensor
o = reduction(torch.ones(4, 2))
if not (isinstance(o, numbers.Number) or _is_0D_tensor(o)):
raise TypeError(f"Output of the reduction function should be a scalar, but got {type(o)}")
self.reduction = reduction
class BaseLogger(metaclass=ABCMeta):
"""
Base logger handler. See implementations: TensorboardLogger, VisdomLogger, PolyaxonLogger, MLflowLogger, ...
"""
def attach(
self,
engine: Engine,
log_handler: Callable,
event_name: Union[str, Events, CallableEventWithFilter, EventsList],
*args: Any,
**kwargs: Any,
) -> RemovableEventHandle:
"""Attach the logger to the engine and execute `log_handler` function at `event_name` events.
Args:
engine: engine object.
log_handler: a logging handler to execute
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name`
added by :meth:`~ignite.engine.engine.Engine.register_events`.
args: args forwarded to the `log_handler` method
kwargs: kwargs forwarded to the `log_handler` method
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
if isinstance(event_name, EventsList):
for name in event_name:
if name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{name}'")
engine.add_event_handler(name, log_handler, self, name)
return RemovableEventHandle(event_name, log_handler, engine)
else:
if event_name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{event_name}'")
return engine.add_event_handler(event_name, log_handler, self, event_name, *args, **kwargs)
def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any) -> RemovableEventHandle:
"""Shortcut method to attach `OutputHandler` to the logger.
Args:
engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
args: args to initialize `OutputHandler`
kwargs: kwargs to initialize `OutputHandler`
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name)
def attach_opt_params_handler(
self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any
) -> RemovableEventHandle:
"""Shortcut method to attach `OptimizerParamsHandler` to the logger.
Args:
engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
args: args to initialize `OptimizerParamsHandler`
kwargs: kwargs to initialize `OptimizerParamsHandler`
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
.. versionchanged:: 0.4.3
Added missing return statement.
"""
return self.attach(engine, self._create_opt_params_handler(*args, **kwargs), event_name=event_name)
@abstractmethod
def _create_output_handler(self, engine: Engine, *args: Any, **kwargs: Any) -> Callable:
pass
@abstractmethod
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable:
pass
def __enter__(self) -> "BaseLogger":
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
self.close()
def close(self) -> None:
pass
|
""" ``ignite.contrib.handlers.time_profilers.py`` was moved to ``ignite.handlers.time_profilers``.
Note:
``ignite.contrib.handlers.time_profilers`` was moved to ``ignite.handlers.time_profilers``.
Please refer to :mod:`~ignite.handlers.time_profilers`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/time_profilers.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
__all__ = [
"BasicTimeProfiler",
"HandlersTimeProfiler",
]
BasicTimeProfiler = BasicTimeProfiler
HandlersTimeProfiler = HandlersTimeProfiler
|
"""ClearML logger and its helper handlers."""
import os
import tempfile
import warnings
from collections import defaultdict
from datetime import datetime
from enum import Enum
from typing import Any, Callable, DefaultDict, List, Mapping, Optional, Tuple, Type, Union
from torch.optim import Optimizer
import ignite.distributed as idist
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
from ignite.handlers.checkpoint import DiskSaver
__all__ = [
"ClearMLLogger",
"ClearMLSaver",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"WeightsHistHandler",
"GradsScalarHandler",
"GradsHistHandler",
"global_step_from_engine",
]
class ClearMLLogger(BaseLogger):
"""
`ClearML <https://github.com/allegroai/clearml>`_ handler to log metrics, text, model/optimizer parameters,
plots during training and validation.
Also supports model checkpoints logging and upload to the storage solution of your choice (i.e. ClearML File server,
S3 bucket etc.)
.. code-block:: bash
pip install clearml
clearml-init
Args:
kwargs: Keyword arguments accepted from ``Task.init`` method.
All arguments are optional. If a ClearML Task has already been created,
kwargs will be ignored and the current ClearML Task will be used.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log training loss at each iteration
clearml_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
clearml_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
clearml_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
"""
def __init__(self, **kwargs: Any):
try:
from clearml import Task
from clearml.binding.frameworks.tensorflow_bind import WeightsGradientHistHelper
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
experiment_kwargs = {k: v for k, v in kwargs.items() if k not in ("project_name", "task_name", "task_type")}
if self.bypass_mode():
warnings.warn("ClearMLSaver: running in bypass mode")
# Try to retrieve current the ClearML Task before trying to create a new one
self._task = Task.current_task()
if self._task is None:
self._task = Task.init(
project_name=kwargs.get("project_name"),
task_name=kwargs.get("task_name"),
task_type=kwargs.get("task_type", Task.TaskTypes.training),
**experiment_kwargs,
)
self.clearml_logger = self._task.get_logger()
self.grad_helper = WeightsGradientHistHelper(logger=self.clearml_logger, report_freq=1)
@classmethod
def set_bypass_mode(cls, bypass: bool) -> None:
"""
Set ``clearml.Task`` to offline mode.
Will bypass all outside communication, and will save all data and logs to a local session folder.
Should only be used in "standalone mode", when there is no access to the *clearml-server*.
Args:
bypass: If ``True``, all outside communication is skipped.
Data and logs will be stored in a local session folder.
For more information, please refer to `ClearML docs
<https://clear.ml/docs/latest/docs/clearml_sdk/task_sdk/#offline-mode>`_.
"""
from clearml import Task
setattr(cls, "_bypass", bypass)
Task.set_offline(offline_mode=bypass)
@classmethod
def bypass_mode(cls) -> bool:
"""
Returns the bypass mode state.
Note:
`GITHUB_ACTIONS` env will automatically set bypass_mode to ``True``
unless overridden specifically with ``ClearMLLogger.set_bypass_mode(False)``.
For more information, please refer to `ClearML docs
<https://clear.ml/docs/latest/docs/clearml_sdk/task_sdk/#offline-mode>`_.
Return:
If True, ``clearml.Task`` is on offline mode, and all outside communication is skipped.
"""
return getattr(cls, "_bypass", bool(os.environ.get("CI")))
def __getattr__(self, attr: Any) -> Any:
"""
Calls the corresponding method of ``clearml.Logger``.
Args:
attr: methods of the ``clearml.Logger`` class.
"""
return getattr(self.clearml_logger, attr)
def get_task(self) -> Any:
"""
Returns the task context that the logger is reporting.
Return:
Returns the current task, equivalent to ``clearml.Task.current_task()``.
"""
return self._task
def close(self) -> None:
self.clearml_logger.flush()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.clearml_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
clearml_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on ClearML.
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
clearml_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler OutputHandler works only with ClearMLLogger")
metrics = self._setup_output_metrics_state_attrs(engine)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
if len(key) == 2:
logger.clearml_logger.report_scalar(title=key[0], series=key[1], iteration=global_step, value=value)
elif len(key) == 3:
logger.clearml_logger.report_scalar(
title=f"{key[0]}/{key[1]}", series=key[2], iteration=global_step, value=value
)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
clearml_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
clearml_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
str(i): float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}{self.param_name}", series=k, value=v, iteration=global_step
)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log only `fc` weights
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler WeightsScalarHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
title_name, _, series_name = name.partition(".")
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}weights_{self.reduction.__name__}/{title_name}",
series=series_name,
value=self.reduction(p.data),
iteration=global_step,
)
class WeightsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's weights as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, 'generator'
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights of `fc` layer
weights = ['fc']
# Attach the logger to the trainer to log weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weights)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights which name include 'conv'.
weight_selector = lambda name, p: 'conv' in name
# Attach the logger to the trainer to log weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weight_selector)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
title_name, _, series_name = name.partition(".")
logger.grad_helper.add_histogram(
title=f"{tag_prefix}weights_{title_name}",
series=series_name,
step=global_step,
hist_data=p.data.cpu().numpy(),
)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of `base`
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler GradsScalarHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
title_name, _, series_name = name.partition(".")
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}grads_{self.reduction.__name__}/{title_name}",
series=series_name,
value=self.reduction(p.grad),
iteration=global_step,
)
class GradsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's gradients as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, 'generator'
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of `fc.bias`
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=['fc.bias'])
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of weights which have shape (2, 1)
def has_shape_2_1(n, p):
return p.shape == (2,1)
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
title_name, _, series_name = name.partition(".")
logger.grad_helper.add_histogram(
title=f"{tag_prefix}grads_{title_name}",
series=series_name,
step=global_step,
hist_data=p.grad.cpu().numpy(),
)
class ClearMLSaver(DiskSaver):
"""
Handler that saves input checkpoint as ClearML artifacts
Args:
logger: An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`,
ensuring a valid ClearML ``Task`` has been initialized. If not provided, and a ClearML Task
has not been manually initialized, a runtime error will be raised.
output_uri: The default location for output models and other artifacts uploaded by ClearML. For
more information, see ``clearml.Task.init``.
dirname: Directory path where the checkpoint will be saved. If not provided, a temporary
directory will be created.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
from ignite.handlers import Checkpoint
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
to_save = {"model": model}
handler = Checkpoint(
to_save,
ClearMLSaver(),
n_saved=1,
score_function=lambda e: 123,
score_name="acc",
filename_prefix="best",
global_step_transform=global_step_from_engine(trainer)
)
validation_evaluator.add_event_handler(Events.EVENT_COMPLETED, handler)
"""
def __init__(
self,
logger: Optional[ClearMLLogger] = None,
output_uri: Optional[str] = None,
dirname: Optional[str] = None,
*args: Any,
**kwargs: Any,
):
self._setup_check_clearml(logger, output_uri)
if not dirname:
dirname = ""
if idist.get_rank() == 0:
dirname = tempfile.mkdtemp(prefix=f"ignite_checkpoints_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S_')}")
if idist.get_world_size() > 1:
dirname = idist.all_gather(dirname)[0] # type: ignore[index, assignment]
warnings.warn(f"ClearMLSaver created a temporary checkpoints directory: {dirname}")
idist.barrier()
# Let's set non-atomic tmp dir saving behaviour
if "atomic" not in kwargs:
kwargs["atomic"] = False
self._checkpoint_slots: DefaultDict[Union[str, Tuple[str, str]], List[Any]] = defaultdict(list)
super(ClearMLSaver, self).__init__(dirname=dirname, *args, **kwargs) # type: ignore[misc]
@idist.one_rank_only()
def _setup_check_clearml(self, logger: ClearMLLogger, output_uri: str) -> None:
try:
from clearml import Task
except ImportError:
try:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
if logger and not isinstance(logger, ClearMLLogger):
raise TypeError("logger must be an instance of ClearMLLogger")
self._task = Task.current_task()
if not self._task:
raise RuntimeError(
"ClearMLSaver requires a ClearML Task to be initialized. "
"Please use the `logger` argument or call `clearml.Task.init()`."
)
if output_uri:
self._task.output_uri = output_uri
class _CallbacksContext:
def __init__(
self,
callback_type: Type[Enum],
slots: List,
checkpoint_key: str,
filename: str,
basename: str,
metadata: Optional[Mapping] = None,
) -> None:
self._callback_type = callback_type
self._slots = slots
self._checkpoint_key = str(checkpoint_key)
self._filename = filename
self._basename = basename
self._metadata = metadata
def pre_callback(self, action: str, model_info: Any) -> Any:
if action != self._callback_type.save: # type: ignore[attr-defined]
return model_info
try:
slot = self._slots.index(None)
self._slots[slot] = model_info.upload_filename
except ValueError:
self._slots.append(model_info.upload_filename)
slot = len(self._slots) - 1
model_info.upload_filename = f"{self._basename}_{slot}{os.path.splitext(self._filename)[1]}"
model_info.local_model_id = f"{self._checkpoint_key}:{model_info.upload_filename}"
return model_info
def post_callback(self, action: str, model_info: Any) -> Any:
if action != self._callback_type.save: # type: ignore[attr-defined]
return model_info
model_info.model.name = f"{model_info.task.name}: {self._filename}"
prefix = "Checkpoint Metadata: "
metadata_items = ", ".join(f"{k}={v}" for k, v in self._metadata.items()) if self._metadata else "none"
metadata = f"{prefix}{metadata_items}"
comment = "\n".join(
metadata if line.startswith(prefix) else line for line in (model_info.model.comment or "").split("\n")
)
if prefix not in comment:
comment += "\n" + metadata
model_info.model.comment = comment
return model_info
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
try:
from clearml.binding.frameworks import WeightsFileHandler
except ImportError:
try:
# Backwards-compatibility for legacy Trains SDK
from trains.binding.frameworks import WeightsFileHandler
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
try:
basename = metadata["basename"] # type: ignore[index]
except (TypeError, KeyError):
warnings.warn("Checkpoint metadata missing or basename cannot be found")
basename = "checkpoint"
checkpoint_key = (str(self.dirname), basename)
cb_context = self._CallbacksContext(
callback_type=WeightsFileHandler.CallbackType,
slots=self._checkpoint_slots[checkpoint_key],
checkpoint_key=str(checkpoint_key),
filename=filename,
basename=basename,
metadata=metadata,
)
pre_cb_id = WeightsFileHandler.add_pre_callback(cb_context.pre_callback)
post_cb_id = WeightsFileHandler.add_post_callback(cb_context.post_callback)
try:
super(ClearMLSaver, self).__call__(checkpoint, filename, metadata)
finally:
WeightsFileHandler.remove_pre_callback(pre_cb_id)
WeightsFileHandler.remove_post_callback(post_cb_id)
@idist.one_rank_only()
def get_local_copy(self, filename: str) -> Optional[str]:
"""Get artifact local copy.
.. warning::
In distributed configuration this method should be called on rank 0 process.
Args:
filename: artifact name.
Returns:
a local path to a downloaded copy of the artifact
"""
artifact = self._task.artifacts.get(filename)
if artifact:
return artifact.get_local_copy()
self._task.get_logger().report_text(f"Can not find artifact {filename}")
return None
@idist.one_rank_only()
def remove(self, filename: str) -> None:
super(ClearMLSaver, self).remove(filename)
for slots in self._checkpoint_slots.values():
try:
slots[slots.index(filename)] = None
except ValueError:
pass
else:
break
|
"""Neptune logger and its helper handlers."""
import tempfile
import warnings
from typing import Any, Callable, List, Mapping, Optional, Union
import torch
from torch.optim import Optimizer
import ignite.distributed as idist
from ignite import __version__
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
from ignite.handlers.checkpoint import BaseSaveHandler
__all__ = [
"NeptuneLogger",
"NeptuneSaver",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"GradsScalarHandler",
"global_step_from_engine",
]
_INTEGRATION_VERSION_KEY = "source_code/integrations/neptune-pytorch-ignite"
class NeptuneLogger(BaseLogger):
"""
`Neptune <https://neptune.ai/>`_ handler to log metrics, model/optimizer parameters and gradients during training
and validation. It can also log model checkpoints to Neptune.
.. code-block:: bash
pip install neptune
Args:
api_token: Neptune API token, found on https://neptune.ai -> User menu -> "Get your API token".
If None, the value of the NEPTUNE_API_TOKEN environment variable is used. To keep your token
secure, you should set it to the environment variable rather than including it in your code.
project: Name of a Neptune project, in the form "workspace-name/project-name".
For example "tom/mnist-classification".
If None, the value of the NEPTUNE_PROJECT environment variable is used.
**kwargs: Other arguments to be passed to the `init_run()` function.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# Note: We are using the API token for anonymous logging. You can pass your own token, or save it as an
# environment variable and leave out the api_token argument.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project="common/pytorch-ignite-integration",
name="cnn-mnist", # Optional,
tags=["pytorch-ignite", "minst"], # Optional
)
# Attach the logger to the trainer to log training loss at each iteration.
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss},
)
# Attach the logger to the evaluator on the training dataset and log NLL
# and accuracy metrics after each epoch.
# We set up `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
npt_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL and accuracy metrics after
# each epoch. We set up `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the trainer to log optimizer parameters, such as learning rate at each iteration.
npt_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name="lr", # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration.
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model),
)
Explore runs with Neptune tracking here:
https://app.neptune.ai/o/common/org/pytorch-ignite-integration/
You can also save model checkpoints to a Neptune:
.. code-block:: python
from ignite.handlers import Checkpoint
def score_function(engine):
return engine.state.metrics["accuracy"]
to_save = {"model": model}
handler = Checkpoint(
to_save,
NeptuneSaver(npt_logger), n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, handler)
It is also possible to use the logger as a context manager:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
with NeptuneLogger() as npt_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss},
)
"""
def __getattr__(self, attr: Any) -> Any:
return getattr(self.experiment, attr)
def __getitem__(self, key: str) -> Any:
return self.experiment[key]
def __setitem__(self, key: str, val: Any) -> Any:
self.experiment[key] = val
def __init__(self, api_token: Optional[str] = None, project: Optional[str] = None, **kwargs: Any) -> None:
try:
try:
# neptune-client<1.0.0 package structure
with warnings.catch_warnings():
# ignore the deprecation warnings
warnings.simplefilter("ignore")
import neptune.new as neptune
except ImportError:
# neptune>=1.0.0 package structure
import neptune
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires the Neptune client library to be installed. "
"Install neptune with the command: \n pip install neptune \n"
)
run = neptune.init_run(
api_token=api_token,
project=project,
**kwargs,
)
run[_INTEGRATION_VERSION_KEY] = __version__
self.experiment = run
def close(self) -> None:
self.experiment.stop()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.neptune_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
npt_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite", "minst"] # Optional
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on NeptuneML.
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler OutputHandler works only with NeptuneLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
logger[key].append(value, step=global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
npt_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
npt_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler OptimizerParamsHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger[k].append(v, step=global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log only `fc` weights
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler WeightsScalarHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
key = f"{tag_prefix}weights_{self.reduction.__name__}/{name}"
logger[key].append(self.reduction(p.data), step=global_step)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log gradient of `base`
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler GradsScalarHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
key = f"{tag_prefix}grads_{self.reduction.__name__}/{name}"
logger[key].append(self.reduction(p.grad), step=global_step)
class NeptuneSaver(BaseSaveHandler):
"""Handler that saves input checkpoint to the Neptune server.
Args:
neptune_logger: an instance of
NeptuneLogger class.
.. Note ::
NeptuneSaver is currently not supported on Windows.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
...
evaluator = create_supervised_evaluator(model, metrics=metrics, ...)
...
from ignite.handlers import Checkpoint
def score_function(engine):
return engine.state.metrics["accuracy"]
to_save = {"model": model}
# pass neptune logger to NeptuneServer
handler = Checkpoint(
to_save,
NeptuneSaver(npt_logger), n_saved=2,
filename_prefix="best", score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, handler)
# We need to close the logger when we are done
npt_logger.close()
For example, you can access model checkpoints and download them from here:
https://ui.neptune.ai/o/shared/org/pytorch-ignite-integration/e/PYTOR1-18/charts
"""
@idist.one_rank_only()
def __init__(self, neptune_logger: NeptuneLogger):
self._logger = neptune_logger
@idist.one_rank_only()
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
# wont work on XLA
# Imports for BC compatibility
try:
# neptune-client<1.0.0 package structure
with warnings.catch_warnings():
# ignore the deprecation warnings
warnings.simplefilter("ignore")
from neptune.new.types import File
except ImportError:
# neptune>=1.0.0 package structure
from neptune.types import File
with tempfile.NamedTemporaryFile() as tmp:
# we can not use tmp.name to open tmp.file twice on Win32
# https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile
torch.save(checkpoint, tmp.file)
# rewind the buffer
tmp.file.seek(0)
# hold onto the file stream for uploading.
# NOTE: This won't load the whole file in memory and upload
# the stream in smaller chunks.
self._logger[filename].upload(File.from_stream(tmp.file))
@idist.one_rank_only(with_barrier=True)
def remove(self, filename: str) -> None:
del self._logger.experiment[filename]
|
# -*- coding: utf-8 -*-
"""TQDM logger."""
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Union
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle
class ProgressBar(BaseLogger):
"""
TQDM progress bar handler to log training progress and computed metrics.
Args:
persist: set to ``True`` to persist the progress bar after completion (default = ``False``)
bar_format : Specify a custom bar string formatting. May impact performance.
[default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
tqdm_kwargs: kwargs passed to tqdm progress bar.
By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
number of epochs; however, if ``max_epochs`` are set to 1, the progress bar instead displays
"Iteration: [5/10]". If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
"Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
Examples:
Simple progress bar
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
pbar = ProgressBar()
pbar.attach(trainer)
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
Log output to a file instead of stderr (tqdm's default output)
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
log_file = open("output.log", "w")
pbar = ProgressBar(file=log_file)
pbar.attach(trainer)
Attach metrics that already have been computed at :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
(such as :class:`~ignite.metrics.RunningAverage`)
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
pbar = ProgressBar()
pbar.attach(trainer, ['loss'])
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Directly attach the engine's output
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
pbar = ProgressBar()
pbar.attach(trainer, output_transform=lambda x: {'loss': x})
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
pbar.attach(
trainer,
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Note:
When attaching the progress bar to an engine, it is recommended that you replace
every print operation in the engine's handlers triggered every iteration with
``pbar.log_message`` to guarantee the correct format of the stdout.
Note:
When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
to an empty string value.
.. versionchanged:: 0.4.7
`attach` now accepts an optional list of `state_attributes`
"""
_events_order: List[Union[Events, CallableEventWithFilter]] = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
]
def __init__(
self,
persist: bool = False,
bar_format: Union[
str, None
] = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]",
**tqdm_kwargs: Any,
):
try:
from tqdm.autonotebook import tqdm
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires tqdm to be installed. "
"Please install it with command: \n pip install tqdm"
)
self.pbar_cls = tqdm
self.pbar = None
self.persist = persist
self.bar_format = bar_format
self.tqdm_kwargs = tqdm_kwargs
def _reset(self, pbar_total: Optional[int]) -> None:
self.pbar = self.pbar_cls(
total=pbar_total, leave=self.persist, bar_format=self.bar_format, initial=1, **self.tqdm_kwargs
)
def _close(self, engine: Engine) -> None:
if self.pbar is not None:
# https://github.com/tqdm/notebook.py#L240-L250
# issue #1115 : notebook backend of tqdm checks if n < total (error or KeyboardInterrupt)
# and the bar persists in 'danger' mode
if self.pbar.total is not None:
self.pbar.n = self.pbar.total
self.pbar.close()
self.pbar = None
@staticmethod
def _compare_lt(
event1: Union[Events, CallableEventWithFilter], event2: Union[Events, CallableEventWithFilter]
) -> bool:
i1 = ProgressBar._events_order.index(event1)
i2 = ProgressBar._events_order.index(event2)
return i1 < i2
def log_message(self, message: str) -> None:
"""
Logs a message, preserving the progress bar correct output format.
Args:
message: string you wish to log.
"""
from tqdm import tqdm
tqdm.write(message, file=self.tqdm_kwargs.get("file", None))
def attach( # type: ignore[override]
self,
engine: Engine,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
event_name: Union[Events, CallableEventWithFilter] = Events.ITERATION_COMPLETED,
closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED,
state_attributes: Optional[List[str]] = None,
) -> None:
"""
Attaches the progress bar to an engine object.
Args:
engine: engine object.
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: a function to select what you want to print from the engine's
output. This function may return either a dictionary with entries in the format of ``{name: value}``,
or a single scalar, which will be displayed with the default name `output`.
event_name: event's name on which the progress bar advances. Valid events are from
:class:`~ignite.engine.events.Events`.
closing_event_name: event's name on which the progress bar is closed. Valid events are from
:class:`~ignite.engine.events.Events`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Note:
Accepted output value types are numbers, 0d and 1d torch tensors and strings.
"""
desc = self.tqdm_kwargs.get("desc", None)
if event_name not in engine._allowed_events:
raise ValueError(f"Logging event {event_name.name} is not in allowed events for this engine")
if isinstance(closing_event_name, CallableEventWithFilter):
if closing_event_name.filter is not None:
raise ValueError("Closing Event should not be a filtered event")
if not self._compare_lt(event_name, closing_event_name):
raise ValueError(f"Logging event {event_name} should be called before closing event {closing_event_name}")
log_handler = _OutputHandler(
desc,
metric_names,
output_transform,
closing_event_name=closing_event_name,
state_attributes=state_attributes,
)
super(ProgressBar, self).attach(engine, log_handler, event_name)
engine.add_event_handler(closing_event_name, self._close)
def attach_opt_params_handler( # type: ignore[empty-body]
self, engine: Engine, event_name: Union[str, Events], *args: Any, **kwargs: Any
) -> RemovableEventHandle:
"""Intentionally empty"""
pass
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "_OutputHandler":
return _OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable: # type: ignore[empty-body]
"""Intentionally empty"""
pass
class _OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
pbar = ProgressBar()
Args:
description: progress bar description.
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
closing_event_name: event's name on which the progress bar is closed. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
"""
def __init__(
self,
description: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED,
state_attributes: Optional[List[str]] = None,
):
if metric_names is None and output_transform is None:
# This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
metric_names = []
super(_OutputHandler, self).__init__(
description, metric_names, output_transform, global_step_transform=None, state_attributes=state_attributes
)
self.closing_event_name = closing_event_name
@staticmethod
def get_max_number_events(event_name: Union[str, Events, CallableEventWithFilter], engine: Engine) -> Optional[int]:
if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
return engine.state.epoch_length
if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
return engine.state.max_epochs
return 1
def __call__(self, engine: Engine, logger: ProgressBar, event_name: Union[str, Events]) -> None:
pbar_total = self.get_max_number_events(event_name, engine)
if logger.pbar is None:
logger._reset(pbar_total=pbar_total)
max_epochs = engine.state.max_epochs
default_desc = "Iteration" if max_epochs == 1 else "Epoch"
desc = self.tag or default_desc
max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
if max_num_of_closing_events and max_num_of_closing_events > 1:
global_step = engine.state.get_event_attrib_value(self.closing_event_name)
desc += f" [{global_step}/{max_num_of_closing_events}]"
logger.pbar.set_description(desc) # type: ignore[attr-defined]
rendered_metrics = self._setup_output_metrics_state_attrs(engine, log_text=True)
metrics = OrderedDict()
for key, value in rendered_metrics.items():
key = "_".join(key[1:]) # tqdm has tag as description
metrics[key] = value
if metrics:
logger.pbar.set_postfix(metrics) # type: ignore[attr-defined]
global_step = engine.state.get_event_attrib_value(event_name)
if pbar_total is not None:
global_step = (global_step - 1) % pbar_total + 1
logger.pbar.update(global_step - logger.pbar.n) # type: ignore[attr-defined]
|
import random
import warnings
from collections import OrderedDict
from functools import wraps
from typing import Any, Callable, Generator, Iterator, List, Optional
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
from ignite.engine.engine import Engine
from ignite.engine.events import Events
from ignite.utils import manual_seed
__all__ = ["update_dataloader", "keep_random_state", "ReproducibleBatchSampler", "DeterministicEngine"]
def update_dataloader(dataloader: DataLoader, new_batch_sampler: BatchSampler) -> DataLoader:
"""Helper function to replace current batch sampler of the dataloader by a new batch sampler. Function returns new
dataloader with new batch sampler.
Args:
dataloader: input dataloader
new_batch_sampler: new batch sampler to use
Returns:
DataLoader
"""
params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")]
for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]:
if k in params_keys:
params_keys.remove(k)
params = {k: getattr(dataloader, k) for k in params_keys}
params["batch_sampler"] = new_batch_sampler
return type(dataloader)(**params)
class ReproducibleBatchSampler(BatchSampler):
"""Reproducible batch sampler. This class internally iterates and stores indices of the input batch sampler.
This helps to start providing data batches from an iteration in a deterministic way.
Args:
batch_sampler: batch sampler same as used with `torch.utils.data.DataLoader`.
start_iteration: optional start iteration.
Examples:
Setup dataloader with `ReproducibleBatchSampler` and start providing data batches from an iteration
.. code-block:: python
from ignite.engine.deterministic import update_dataloader
dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
# rewind dataloader to a specific iteration:
dataloader.batch_sampler.start_iteration = start_iteration
"""
def __init__(self, batch_sampler: BatchSampler, start_iteration: Optional[int] = None):
if not isinstance(batch_sampler, BatchSampler):
raise TypeError("Argument batch_sampler should be torch.utils.data.sampler.BatchSampler")
self.batch_indices: List = []
self.batch_sampler = batch_sampler
self.start_iteration = start_iteration
self.sampler = self.batch_sampler.sampler
def setup_batch_indices(self) -> None:
"""Setup batch indices."""
self.batch_indices = []
for batch in self.batch_sampler:
self.batch_indices.append(batch)
if self.start_iteration is not None:
self.batch_indices = self.batch_indices[self.start_iteration :]
self.start_iteration = None
def __iter__(self) -> Generator:
self.setup_batch_indices()
for batch in self.batch_indices:
yield batch
def __len__(self) -> int:
return len(self.batch_sampler)
def _get_rng_states() -> List[Any]:
output = [random.getstate(), torch.get_rng_state()]
try:
import numpy as np
output.append(np.random.get_state())
except ImportError:
pass
return output
def _set_rng_states(rng_states: List[Any]) -> None:
random.setstate(rng_states[0])
if "cpu" not in rng_states[1].device.type:
rng_states[1] = rng_states[1].cpu()
torch.set_rng_state(rng_states[1])
try:
import numpy as np
np.random.set_state(rng_states[2])
except ImportError:
pass
def _repr_rng_state(rng_states: List[Any]) -> str:
from hashlib import md5
out = " ".join([md5(str(list(s)).encode("utf-8")).hexdigest() for s in rng_states])
return out
def keep_random_state(func: Callable) -> Callable:
"""Helper decorator to keep random state of torch, numpy and random intact
while executing a function. For more details on usage, please see :ref:`Dataflow synchronization`.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> None:
rng_states = _get_rng_states()
func(*args, **kwargs)
_set_rng_states(rng_states)
return wrapper
class DeterministicEngine(Engine):
"""Deterministic engine derived from :class:`~ignite.engine.engine.Engine`.
"Deterministic" run is done by adding additional handlers to synchronize the dataflow and overriding some methods of
:class:`~ignite.engine.engine.Engine`:
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
If input data provider is `DataLoader`, its batch sampler is replaced by
:class:`~ignite.engine.deterministic.ReproducibleBatchSampler`.
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
setup_sampling(dataloader)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
Internally, `torch.backends.cudnn.deterministic = True` and `torch.backends.cudnn.benchmark = False` are also
applied.
For more details about dataflow synchronization, please see :ref:`Dataflow synchronization`.
.. Note ::
This class can produce exactly the same dataflow when resuming the run from an epoch (or more precisely from
dataflow restart) and using torch `DataLoader` with `num_workers > 1` as data provider.
Args:
process_function: A function receiving a handle to the engine and the current batch
in each iteration, and returns data to be stored in the engine's state.
"""
def __init__(self, process_function: Callable[[Engine, Any], Any]):
super(DeterministicEngine, self).__init__(process_function)
self.state_dict_user_keys.append("rng_states")
if not hasattr(self.state, "rng_states"):
setattr(self.state, "rng_states", None)
self.add_event_handler(Events.STARTED, self._init_run)
self.add_event_handler(Events.DATALOADER_STOP_ITERATION | Events.TERMINATE_SINGLE_EPOCH, self._setup_seed)
def state_dict(self) -> OrderedDict:
state_dict = super(DeterministicEngine, self).state_dict()
state_dict["rng_states"] = _get_rng_states()
return state_dict
def _init_run(self) -> None:
self.state.seed = int(torch.randint(0, int(1e9), (1,)).item())
if torch.cuda.is_available():
if hasattr(torch, "use_deterministic_algorithms"):
torch.use_deterministic_algorithms(True, warn_only=True)
else:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _setup_engine(self) -> None:
if self.state.dataloader is None:
raise ValueError(
"Deterministic engine does not support the option of data=None. Please, provide data as iterable"
)
self._dataloader_len = self._get_data_length(self.state.dataloader)
# if input data is torch dataloader we replace batch sampler by a batch sampler
# such that its random sampling indices are reproducible by prefetching them before data iteration
if isinstance(self.state.dataloader, DataLoader):
# attribute _dataset_kind is introduced since 1.3.0 => before 1.3.0 all datasets are map-like
can_patch_dataloader = True
if hasattr(self.state.dataloader, "_dataset_kind"):
from torch.utils.data.dataloader import _DatasetKind
_dataloader_kind = self.state.dataloader._dataset_kind
can_patch_dataloader = _dataloader_kind == _DatasetKind.Map
if can_patch_dataloader:
if self._dataloader_len is not None and hasattr(self.state.dataloader.sampler, "epoch"):
if self._dataloader_len != self.state.epoch_length:
warnings.warn(
"When defined engine's epoch length is different of input dataloader length, "
"distributed sampler indices can not be setup in a reproducible manner"
)
batch_sampler = self.state.dataloader.batch_sampler
if not (batch_sampler is None or isinstance(batch_sampler, ReproducibleBatchSampler)):
self.state.dataloader = update_dataloader(
self.state.dataloader, ReproducibleBatchSampler(batch_sampler) # type: ignore[arg-type]
)
iteration = self.state.iteration
self._dataloader_iter = self._from_iteration(iteration)
# Below we define initial counter value for _run_once_on_dataset to measure a single epoch
if self.state.epoch_length is not None:
iteration %= self.state.epoch_length
self._init_iter = iteration
# restore rng state if in the middle
in_the_middle = self.state.iteration % self._dataloader_len > 0 if self._dataloader_len is not None else False
rng_states = getattr(self.state, "rng_states", None)
if rng_states is not None and in_the_middle:
_set_rng_states(rng_states)
setattr(self.state, "rng_states", None)
def _from_iteration(self, iteration: int) -> Iterator:
if self.state.dataloader is None:
raise RuntimeError(
"Internal error, self.state.dataloader is None. Please, file an issue if you encounter this error."
)
data = self.state.dataloader
if isinstance(data, DataLoader):
try:
# following is unsafe for IterableDatasets
iteration %= len(data.batch_sampler) # type: ignore[arg-type]
# Synchronize dataflow according to state.iteration
self._setup_seed()
if iteration > 0:
# batch sampler is ReproducibleBatchSampler
data.batch_sampler.start_iteration = iteration # type: ignore[union-attr]
return iter(data)
except TypeError as e:
# Probably we can do nothing with DataLoader built upon IterableDatasets
pass
self.logger.info("Resuming from iteration for provided data will fetch data until required iteration ...")
if hasattr(data, "__len__"):
iteration %= len(data) # type: ignore[arg-type]
# Synchronize dataflow from the begining
self._setup_seed(iteration=0)
data_iter = iter(data)
counter = 0
while counter < iteration:
try:
next(data_iter)
counter += 1
except StopIteration:
data_iter = iter(data)
return data_iter
def _setup_seed(self, _: Any = None, iter_counter: Optional[int] = None, iteration: Optional[int] = None) -> None:
if iter_counter is None:
le = self._dataloader_len if self._dataloader_len is not None else 1
elif not iter_counter > 0:
raise ValueError("iter_counter should be positive value")
else:
le = iter_counter
if iteration is None:
iteration = self.state.iteration
manual_seed(self.state.seed + iteration // le) # type: ignore[operator]
|
import numbers
import warnings
import weakref
from collections.abc import Sequence
from enum import Enum
from types import DynamicClassAttribute
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, TYPE_CHECKING, Union
from torch.utils.data import DataLoader
from ignite.engine.utils import _check_signature
if TYPE_CHECKING:
from ignite.engine.engine import Engine
__all__ = ["CallableEventWithFilter", "EventEnum", "Events", "State", "EventsList", "RemovableEventHandle"]
class CallableEventWithFilter:
"""Single Event containing a filter, specifying whether the event should
be run at the current event (if the event type is correct)
Args:
value: The actual enum value. Only needed for internal use. Do not touch!
event_filter: A function taking the engine and the current event value as input and returning a
boolean to indicate whether this event should be executed. Defaults to None, which will result to a
function that always returns `True`
name: The enum-name of the current object. Only needed for internal use. Do not touch!
"""
def __init__(self, value: str, event_filter: Optional[Callable] = None, name: Optional[str] = None) -> None:
self.filter = event_filter
if not hasattr(self, "_value_"):
self._value_ = value
if not hasattr(self, "_name_") and name is not None:
self._name_ = name
# copied to be compatible to enum
@DynamicClassAttribute
def name(self) -> str:
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self) -> str:
"""The value of the Enum member."""
return self._value_
def __call__(
self,
event_filter: Optional[Callable] = None,
every: Optional[int] = None,
once: Optional[Union[int, List]] = None,
before: Optional[int] = None,
after: Optional[int] = None,
) -> "CallableEventWithFilter":
"""
Makes the event class callable and accepts either an arbitrary callable as filter
(which must take in the engine and current event value and return a boolean) or an every or once value
Args:
event_filter: a filter function to check if the event should be executed when
the event type was fired
every: a value specifying how often the event should be fired
once: a value or list of values specifying when the event should be fired (if only once)
before: a value specifying the number of occurrence that event should be fired before
after: a value specifying the number of occurrence that event should be fired after
Returns:
CallableEventWithFilter: A new event having the same value but a different filter function
"""
if (
sum(
(
event_filter is not None,
once is not None,
(every is not None or before is not None or after is not None),
)
)
!= 1
):
raise ValueError("Only one of the input arguments should be specified, except before, after and every")
if (event_filter is not None) and not callable(event_filter):
raise TypeError("Argument event_filter should be a callable")
if (every is not None) and not (isinstance(every, numbers.Integral) and every > 0):
raise ValueError("Argument every should be integer and greater than zero")
if once is not None:
c1 = isinstance(once, numbers.Integral) and once > 0
c2 = isinstance(once, Sequence) and len(once) > 0 and all(isinstance(e, int) and e > 0 for e in once)
if not (c1 or c2):
raise ValueError(
f"Argument once should either be a positive integer or a list of positive integers, got {once}"
)
if (before is not None) and not (isinstance(before, numbers.Integral) and before >= 0):
raise ValueError("Argument before should be integer and greater or equal to zero")
if (after is not None) and not (isinstance(after, numbers.Integral) and after >= 0):
raise ValueError("Argument after should be integer and greater or equal to zero")
if every is not None:
if every == 1:
# Just return the event itself
event_filter = None
else:
event_filter = self.every_event_filter(every)
if once is not None:
event_filter = self.once_event_filter([once] if isinstance(once, int) else once)
if before is not None or after is not None:
if every is not None:
event_filter = self.every_before_and_after_event_filter(every, before, after)
else:
event_filter = self.before_and_after_event_filter(before, after)
# check signature:
if event_filter is not None:
_check_signature(event_filter, "event_filter", "engine", "event")
return CallableEventWithFilter(self.value, event_filter, self.name)
@staticmethod
def every_event_filter(every: int) -> Callable:
"""A wrapper for every event filter."""
def wrapper(engine: "Engine", event: int) -> bool:
if event % every == 0:
return True
return False
return wrapper
@staticmethod
def once_event_filter(once: List) -> Callable:
"""A wrapper for once event filter."""
def wrapper(engine: "Engine", event: int) -> bool:
if event in once:
return True
return False
return wrapper
@staticmethod
def before_and_after_event_filter(before: Optional[int] = None, after: Optional[int] = None) -> Callable:
"""A wrapper for before and after event filter."""
before_: Union[int, float] = float("inf") if before is None else before
after_: int = 0 if after is None else after
def wrapper(engine: "Engine", event: int) -> bool:
if event > after_ and event < before_:
return True
return False
return wrapper
@staticmethod
def every_before_and_after_event_filter(
every: int, before: Optional[int] = None, after: Optional[int] = None
) -> Callable:
"""A wrapper which triggers for every `every` iterations after `after` and before `before`."""
before_: Union[int, float] = float("inf") if before is None else before
after_: int = 0 if after is None else after
def wrapper(engine: "Engine", event: int) -> bool:
if after_ < event < before_ and (event - after_ - 1) % every == 0:
return True
return False
return wrapper
@staticmethod
def default_event_filter(engine: "Engine", event: int) -> bool:
"""Default event filter. This method is is deprecated and will be removed. Please, use None instead"""
warnings.warn("Events.default_event_filter is deprecated and will be removed. Please, use None instead")
return True
def __repr__(self) -> str:
out = f"Events.{self.name}"
if self.filter is not None:
out += f"(filter={self.filter})"
return out
def __eq__(self, other: Any) -> bool:
if isinstance(other, CallableEventWithFilter):
return self.name == other.name
elif isinstance(other, str):
return self.name == other
else:
return NotImplemented
def __hash__(self) -> int:
return hash(self._name_)
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventEnum(CallableEventWithFilter, Enum):
"""Base class for all :class:`~ignite.engine.events.Events`. User defined custom events should also inherit
this class.
Examples:
Custom events based on the loss calculation and backward pass can be created as follows:
.. code-block:: python
from ignite.engine import EventEnum
class BackpropEvents(EventEnum):
BACKWARD_STARTED = 'backward_started'
BACKWARD_COMPLETED = 'backward_completed'
OPTIM_STEP_COMPLETED = 'optim_step_completed'
def update(engine, batch):
# ...
loss = criterion(y_pred, y)
engine.fire_event(BackpropEvents.BACKWARD_STARTED)
loss.backward()
engine.fire_event(BackpropEvents.BACKWARD_COMPLETED)
optimizer.step()
engine.fire_event(BackpropEvents.OPTIM_STEP_COMPLETED)
# ...
trainer = Engine(update)
trainer.register_events(*BackpropEvents)
@trainer.on(BackpropEvents.BACKWARD_STARTED)
def function_before_backprop(engine):
# ...
"""
def __new__(cls, value: str) -> "EventEnum":
obj = CallableEventWithFilter.__new__(cls)
obj._value_ = value
return obj
class Events(EventEnum):
"""Events that are fired by the :class:`~ignite.engine.engine.Engine` during execution. Built-in events:
- STARTED : triggered when engine's run is started
- EPOCH_STARTED : triggered when the epoch is started
- GET_BATCH_STARTED : triggered before next batch is fetched
- GET_BATCH_COMPLETED : triggered after the batch is fetched
- ITERATION_STARTED : triggered when an iteration is started
- ITERATION_COMPLETED : triggered when the iteration is ended
- DATALOADER_STOP_ITERATION : engine's specific event triggered when dataloader has no more data to provide
- EXCEPTION_RAISED : triggered when an exception is encountered
- TERMINATE_SINGLE_EPOCH : triggered when the run is about to end the current epoch,
after receiving a :meth:`~ignite.engine.engine.Engine.terminate_epoch()` or
:meth:`~ignite.engine.engine.Engine.terminate()` call.
- TERMINATE : triggered when the run is about to end completely,
after receiving :meth:`~ignite.engine.engine.Engine.terminate()` call.
- EPOCH_COMPLETED : triggered when the epoch is ended. Note that this is triggered even
when :meth:`~ignite.engine.engine.Engine.terminate_epoch()` is called.
- COMPLETED : triggered when engine's run is completed
The table below illustrates which events are triggered when various termination methods are called.
.. list-table::
:widths: 24 25 33 18
:header-rows: 1
* - Method
- EVENT_COMPLETED
- TERMINATE_SINGLE_EPOCH
- TERMINATE
* - no termination
- ✔
- ✗
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate_epoch()`
- ✔
- ✔
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate()`
- ✗
- ✔
- ✔
Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine:
.. code-block:: python
engine = Engine()
# a) custom event filter
def custom_event_filter(engine, event):
if event in [1, 2, 5, 10, 50, 100]:
return True
return False
@engine.on(Events.ITERATION_STARTED(event_filter=custom_event_filter))
def call_on_special_event(engine):
# do something on 1, 2, 5, 10, 50, 100 iterations
# b) "every" event filter
@engine.on(Events.ITERATION_STARTED(every=10))
def call_every(engine):
# do something every 10th iteration
# c) "once" event filter
@engine.on(Events.ITERATION_STARTED(once=50))
def call_once(engine):
# do something on 50th iteration
# d) "before" and "after" event filter
@engine.on(Events.EPOCH_STARTED(before=30, after=10))
def call_before(engine):
# do something in 11 to 29 epoch
# e) Mixing "every" and "before" / "after" event filters
@engine.on(Events.EPOCH_STARTED(every=5, before=25, after=8))
def call_every_itr_before_after(engine):
# do something on 9, 14, 19, 24 epochs
Event filter function `event_filter` accepts as input `engine` and `event` and should return True/False.
Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed.
Since v0.4.0, user can also combine events with `|`-operator:
.. code-block:: python
events = Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
Since v0.4.0, custom events defined by user should inherit from :class:`~ignite.engine.events.EventEnum` :
.. code-block:: python
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
"""
EPOCH_STARTED = "epoch_started"
"""triggered when the epoch is started."""
EPOCH_COMPLETED = "epoch_completed"
"""Event attribute indicating epoch is ended."""
STARTED = "started"
"""triggered when engine's run is started."""
COMPLETED = "completed"
"""triggered when engine's run is completed"""
ITERATION_STARTED = "iteration_started"
"""triggered when an iteration is started."""
ITERATION_COMPLETED = "iteration_completed"
"""triggered when the iteration is ended."""
EXCEPTION_RAISED = "exception_raised"
"""triggered when an exception is encountered."""
GET_BATCH_STARTED = "get_batch_started"
"""triggered before next batch is fetched."""
GET_BATCH_COMPLETED = "get_batch_completed"
"""triggered after the batch is fetched."""
DATALOADER_STOP_ITERATION = "dataloader_stop_iteration"
"""engine's specific event triggered when dataloader has no more data to provide"""
TERMINATE = "terminate"
"""triggered when the run is about to end completely, after receiving terminate() call."""
TERMINATE_SINGLE_EPOCH = "terminate_single_epoch"
"""triggered when the run is about to end the current epoch,
after receiving a terminate_epoch() call."""
INTERRUPT = "interrupt"
"""triggered when the run is interrupted, after receiving interrupt() call."""
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventsList:
"""Collection of events stacked by operator `__or__`.
.. code-block:: python
events = Events.STARTED | Events.COMPLETED
events |= Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
or
.. code-block:: python
@engine.on(Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3))
def call_on_events(engine):
# do something
"""
def __init__(self) -> None:
self._events: List[Union[Events, CallableEventWithFilter]] = []
def _append(self, event: Union[Events, CallableEventWithFilter]) -> None:
if not isinstance(event, (Events, CallableEventWithFilter)):
raise TypeError(f"Argument event should be Events or CallableEventWithFilter, got: {type(event)}")
self._events.append(event)
def __getitem__(self, item: int) -> Union[Events, CallableEventWithFilter]:
return self._events[item]
def __iter__(self) -> Iterator[Union[Events, CallableEventWithFilter]]:
return iter(self._events)
def __len__(self) -> int:
return len(self._events)
def __or__(self, other: Union[Events, CallableEventWithFilter]) -> "EventsList":
self._append(event=other)
return self
class State:
"""An object that is used to pass internal and user-defined state between event handlers. By default, state
contains the following attributes:
.. code-block:: python
state.iteration # 1-based, the first iteration is 1
state.epoch # 1-based, the first epoch is 1
state.seed # seed to set at each epoch
state.dataloader # data passed to engine
state.epoch_length # optional length of an epoch
state.max_epochs # number of epochs to run
state.max_iters # number of iterations to run
state.batch # batch passed to `process_function`
state.output # output of `process_function` after a single iteration
state.metrics # dictionary with defined metrics if any
state.times # dictionary with total and per-epoch times fetched on
# keys: Events.EPOCH_COMPLETED.name and Events.COMPLETED.name
Args:
kwargs: keyword arguments to be defined as State attributes.
"""
event_to_attr: Dict[Union[str, "Events", "CallableEventWithFilter"], str] = {
Events.GET_BATCH_STARTED: "iteration",
Events.GET_BATCH_COMPLETED: "iteration",
Events.ITERATION_STARTED: "iteration",
Events.ITERATION_COMPLETED: "iteration",
Events.EPOCH_STARTED: "epoch",
Events.EPOCH_COMPLETED: "epoch",
Events.STARTED: "epoch",
Events.COMPLETED: "epoch",
}
def __init__(self, **kwargs: Any) -> None:
self.iteration = 0
self.epoch = 0
self.epoch_length: Optional[int] = None
self.max_epochs: Optional[int] = None
self.max_iters: Optional[int] = None
self.output: Optional[int] = None
self.batch: Optional[int] = None
self.metrics: Dict[str, Any] = {}
self.dataloader: Optional[Union[DataLoader, Iterable[Any]]] = None
self.seed: Optional[int] = None
self.times: Dict[str, Optional[float]] = {
Events.EPOCH_COMPLETED.name: None,
Events.COMPLETED.name: None,
}
for k, v in kwargs.items():
setattr(self, k, v)
self._update_attrs()
def _update_attrs(self) -> None:
for value in self.event_to_attr.values():
if not hasattr(self, value):
setattr(self, value, 0)
def get_event_attrib_value(self, event_name: Union[str, Events, CallableEventWithFilter]) -> int:
"""Get the value of Event attribute with given `event_name`."""
if event_name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{event_name}'")
return getattr(self, State.event_to_attr[event_name])
def __repr__(self) -> str:
s = "State:\n"
for attr, value in self.__dict__.items():
if not isinstance(value, (numbers.Number, str)):
value = type(value)
s += f"\t{attr}: {value}\n"
return s
class RemovableEventHandle:
"""A weakref handle to remove a registered event.
A handle that may be used to remove a registered event handler via the
remove method, with-statement, or context manager protocol. Returned from
:meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: Registered event name.
handler: Registered event handler, stored as weakref.
engine: Target engine, stored as weakref.
Examples:
.. code-block:: python
engine = Engine()
def print_epoch(engine):
print(f"Epoch: {engine.state.epoch}")
with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch):
# print_epoch handler registered for a single run
engine.run(data)
# print_epoch handler is now unregistered
"""
def __init__(
self, event_name: Union[CallableEventWithFilter, Enum, EventsList, Events], handler: Callable, engine: "Engine"
) -> None:
self.event_name = event_name
self.handler = weakref.ref(handler)
self.engine = weakref.ref(engine)
def remove(self) -> None:
"""Remove handler from engine."""
handler = self.handler()
engine = self.engine()
if handler is None or engine is None:
return
if hasattr(handler, "_parent"):
handler = handler._parent()
if handler is None:
raise RuntimeError(
"Internal error! Please fill an issue on https://github.com/pytorch/ignite/issues "
"if encounter this error. Thank you!"
)
if isinstance(self.event_name, EventsList):
for e in self.event_name:
if engine.has_event_handler(handler, e):
engine.remove_event_handler(handler, e)
else:
if engine.has_event_handler(handler, self.event_name):
engine.remove_event_handler(handler, self.event_name)
def __enter__(self) -> "RemovableEventHandle":
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
self.remove()
|
from collections.abc import Mapping
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.engine.deterministic import DeterministicEngine
from ignite.engine.engine import Engine
from ignite.engine.events import CallableEventWithFilter, EventEnum, Events, EventsList, RemovableEventHandle, State
from ignite.metrics import Metric
from ignite.utils import convert_tensor
__all__ = [
"State",
"create_supervised_trainer",
"create_supervised_evaluator",
"Engine",
"DeterministicEngine",
"Events",
"EventsList",
"EventEnum",
"CallableEventWithFilter",
"RemovableEventHandle",
"supervised_training_step",
"supervised_training_step_amp",
"supervised_training_step_apex",
"supervised_training_step_tpu",
"supervised_evaluation_step",
"supervised_evaluation_step_amp",
]
def _prepare_batch(
batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
) -> Tuple[Union[torch.Tensor, Sequence, Mapping, str, bytes], ...]:
"""Prepare batch for training or evaluation: pass to a device with options."""
x, y = batch
return (
convert_tensor(x, device=device, non_blocking=non_blocking),
convert_tensor(y, device=device, non_blocking=non_blocking),
)
def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_amp(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch.cuda.amp``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
scaler: GradScaler instance for gradient scaling. (default: None)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_amp
model = ...
optimizer = ...
loss_fn = ...
scaler = torch.cuda.amp.GradScaler(2**10)
update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if scaler:
scaler.scale(loss).backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_apex(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using apex.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_apex
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step_apex(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
from apex import amp as apex_amp
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
with apex_amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_tpu(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch_xla``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, TPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_tpu
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step_tpu(model, optimizer, loss_fn, 'xla')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation argument for all supervised training methods.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
import torch_xla.core.xla_model as xm
except ModuleNotFoundError:
raise ModuleNotFoundError("torch_xla cannot be imported, please install PyTorch XLA.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
xm.optimizer_step(optimizer, barrier=True)
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def _check_arg(
on_tpu: bool, amp_mode: Optional[str], scaler: Optional[Union[bool, "torch.cuda.amp.GradScaler"]]
) -> Tuple[Optional[str], Optional["torch.cuda.amp.GradScaler"]]:
"""Checking tpu, amp and GradScaler instance combinations."""
if on_tpu and not idist.has_xla_support:
raise RuntimeError("In order to run on TPU, please install PyTorch XLA")
if amp_mode and on_tpu:
raise ValueError("amp_mode cannot be used with xla device. Consider using amp_mode=None or device='cuda'.")
if scaler:
if amp_mode != "amp":
raise ValueError(f"scaler argument is {scaler}, but amp_mode is {amp_mode}. Consider using amp_mode='amp'.")
elif amp_mode == "amp" and isinstance(scaler, bool):
try:
from torch.cuda.amp import GradScaler
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use scaler argument.")
scaler = GradScaler(enabled=True)
if on_tpu:
return "tpu", None
elif scaler and amp_mode == "amp":
return amp_mode, scaler # type: ignore[return-value]
else:
return amp_mode, None
def create_supervised_trainer(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
deterministic: bool = False,
amp_mode: Optional[str] = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
gradient_accumulation_steps: int = 1,
) -> Engine:
"""Factory function for creating a trainer for supervised models.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU or TPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
deterministic: if True, returns deterministic engine of type
:class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine`
(default: False).
amp_mode: can be ``amp`` or ``apex``, model and optimizer will be casted to float16 using
`torch.cuda.amp <https://pytorch.org/docs/stable/amp.html>`_ for ``amp`` and
using `apex <https://nvidia.github.io/apex>`_ for ``apex``. (default: None)
scaler: GradScaler instance for gradient scaling if `torch>=1.6.0`
and ``amp_mode`` is ``amp``. If ``amp_mode`` is ``apex``, this argument will be ignored.
If True, will create default GradScaler. If GradScaler instance is passed, it will be used instead.
(default: False)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
a trainer engine with supervised update function.
Examples:
Create a trainer
.. code-block:: python
from ignite.engine import create_supervised_trainer
from ignite.utils import convert_tensor
from ignite.contrib.handlers.tqdm_logger import ProgressBar
model = ...
loss = ...
optimizer = ...
dataloader = ...
def prepare_batch_fn(batch, device, non_blocking):
x = ... # get x from batch
y = ... # get y from batch
# return a tuple of (x, y) that can be directly runned as
# `loss_fn(model(x), y)`
return (
convert_tensor(x, device, non_blocking),
convert_tensor(y, device, non_blocking)
)
def output_transform_fn(x, y, y_pred, loss):
# return only the loss is actually the default behavior for
# trainer engine, but you can return anything you want
return loss.item()
trainer = create_supervised_trainer(
model,
optimizer,
loss,
prepare_batch=prepare_batch_fn,
output_transform=output_transform_fn
)
pbar = ProgressBar()
pbar.attach(trainer, output_transform=lambda x: {"loss": x})
trainer.run(dataloader, max_epochs=5)
Note:
If ``scaler`` is True, GradScaler instance will be created internally and trainer state has attribute named
``scaler`` for that instance and can be used for saving and loading.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
of the processed batch by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
.. warning::
If ``amp_mode='apex'`` , the model(s) and optimizer(s) must be initialized beforehand
since ``amp.initialize`` should be called after you have finished constructing your model(s)
and optimizer(s), but before you send your model through any DistributedDataParallel wrapper.
See more: https://nvidia.github.io/apex/amp.html#module-apex.amp
.. versionchanged:: 0.4.5
- Added ``amp_mode`` argument for automatic mixed precision.
- Added ``scaler`` argument for gradient scaling.
.. versionchanged:: 0.4.7
Added Gradient Accumulation argument for all supervised training methods.
.. versionchanged:: 0.4.11
Added ``model_transform`` to transform model's output
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _scaler = _check_arg(on_tpu, amp_mode, scaler)
if mode == "amp":
_update = supervised_training_step_amp(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
_scaler,
gradient_accumulation_steps,
)
elif mode == "apex":
_update = supervised_training_step_apex(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
elif mode == "tpu":
_update = supervised_training_step_tpu(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
else:
_update = supervised_training_step(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
trainer = Engine(_update) if not deterministic else DeterministicEngine(_update)
if _scaler and scaler and isinstance(scaler, bool):
trainer.state.scaler = _scaler # type: ignore[attr-defined]
return trainer
def supervised_evaluation_step(
model: torch.nn.Module,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
) -> Callable:
"""
Factory function for supervised evaluation.
Args:
model: the model to train.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Returns:
Inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
return output_transform(x, y, y_pred)
return evaluate_step
def supervised_evaluation_step_amp(
model: torch.nn.Module,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
) -> Callable:
"""
Factory function for supervised evaluation using ``torch.cuda.amp``.
Args:
model: the model to train.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Returns:
Inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
output = model(x)
y_pred = model_transform(output)
return output_transform(x, y, y_pred)
return evaluate_step
def create_supervised_evaluator(
model: torch.nn.Module,
metrics: Optional[Dict[str, Metric]] = None,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
amp_mode: Optional[str] = None,
) -> Engine:
"""
Factory function for creating an evaluator for supervised models.
Args:
model: the model to train.
metrics: a map of metric names to Metrics.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
amp_mode: can be ``amp``, model will be casted to float16 using
`torch.cuda.amp <https://pytorch.org/docs/stable/amp.html>`_
Returns:
an evaluator engine with supervised inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
.. versionchanged:: 0.4.5
Added ``amp_mode`` argument for automatic mixed precision.
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
metrics = metrics or {}
if mode == "amp":
evaluate_step = supervised_evaluation_step_amp(
model,
device,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
model_transform=model_transform,
output_transform=output_transform,
)
else:
evaluate_step = supervised_evaluation_step(
model,
device,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
model_transform=model_transform,
output_transform=output_transform,
)
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
return evaluator
|
import functools
import logging
import math
import time
import warnings
import weakref
from collections import defaultdict, OrderedDict
from collections.abc import Mapping
from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union
from torch.utils.data import DataLoader
from ignite.base import Serializable
from ignite.engine.events import CallableEventWithFilter, EventEnum, Events, EventsList, RemovableEventHandle, State
from ignite.engine.utils import _check_signature, _to_hours_mins_secs
__all__ = ["Engine"]
class Engine(Serializable):
"""Runs a given ``process_function`` over each batch of a dataset, emitting events as it goes.
Args:
process_function: A function receiving a handle to the engine and the current batch
in each iteration, and returns data to be stored in the engine's state.
Attributes:
state: object that is used to pass internal and user-defined state between event handlers.
It is created with the engine and its attributes (e.g. ``state.iteration``, ``state.epoch`` etc) are reset
on every :meth:`~ignite.engine.engine.Engine.run`.
last_event_name: last event name triggered by the engine.
Note:
:class:`~ignite.engine.engine.Engine` implementation has changed in v0.4.10 with "interrupt/resume" feature.
Engine may behave differently on certain corner cases compared to the one from v0.4.9 and before.
In such case, you can set ``Engine.interrupt_resume_enabled = False`` to restore previous behaviour.
Examples:
Create a basic trainer
.. code-block:: python
model = ...
model = model.cuda()
optimized = ...
criterion = ...
def train_step(engine, batch):
model.train()
inputs, targets = batch[0].cuda(), batch[1].cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(update_model)
@trainer.on(Events.ITERATION_COMPLETED(every=100))
def log_training(engine):
batch_loss = engine.state.output
lr = optimizer.param_groups[0]['lr']
e = engine.state.epoch
n = engine.state.max_epochs
i = engine.state.iteration
print(f"Epoch {e}/{n} : {i} - batch loss: {batch_loss}, lr: {lr}")
trainer.run(data_loader, max_epochs=5)
> Epoch 1/5 : 100 - batch loss: 0.10874069479016124, lr: 0.01
> ...
> Epoch 2/5 : 1700 - batch loss: 0.4217900575859437, lr: 0.01
Create a basic evaluator to compute metrics
.. code-block:: python
from ignite.metrics import Accuracy
def predict_on_batch(engine, batch)
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return y_pred, y
evaluator = Engine(predict_on_batch)
Accuracy().attach(evaluator, "val_acc")
evaluator.run(val_dataloader)
Compute image mean/std on training dataset
.. code-block:: python
from ignite.metrics import Average
def compute_mean_std(engine, batch):
b, c, *_ = batch['image'].shape
data = batch['image'].reshape(b, c, -1).to(dtype=torch.float64)
mean = torch.mean(data, dim=-1).sum(dim=0)
mean2 = torch.mean(data ** 2, dim=-1).sum(dim=0)
return {"mean": mean, "mean^2": mean2}
compute_engine = Engine(compute_mean_std)
img_mean = Average(output_transform=lambda output: output['mean'])
img_mean.attach(compute_engine, 'mean')
img_mean2 = Average(output_transform=lambda output: output['mean^2'])
img_mean2.attach(compute_engine, 'mean2')
state = compute_engine.run(train_loader)
state.metrics['std'] = torch.sqrt(state.metrics['mean2'] - state.metrics['mean'] ** 2)
mean = state.metrics['mean'].tolist()
std = state.metrics['std'].tolist()
Resume engine's run from a state. User can load a `state_dict` and run engine starting from loaded state :
.. code-block:: python
# Restore from an epoch
state_dict = {"epoch": 3, "max_epochs": 100, "epoch_length": len(data_loader)}
# or an iteration
# state_dict = {"iteration": 500, "max_epochs": 100, "epoch_length": len(data_loader)}
trainer = Engine(...)
trainer.load_state_dict(state_dict)
trainer.run(data)
"""
_state_dict_all_req_keys = ("epoch_length", "max_epochs")
_state_dict_one_of_opt_keys = ("iteration", "epoch")
# Flag to disable engine._internal_run as generator feature for BC
interrupt_resume_enabled = True
def __init__(self, process_function: Callable[["Engine", Any], Any]):
self._event_handlers: Dict[Any, List] = defaultdict(list)
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self._process_function = process_function
self.last_event_name: Optional[Events] = None
self.should_terminate = False
self.should_terminate_single_epoch = False
self.should_interrupt = False
self.state = State()
self._state_dict_user_keys: List[str] = []
self._allowed_events: List[EventEnum] = []
self._dataloader_iter: Optional[Iterator[Any]] = None
self._init_iter: Optional[int] = None
self.register_events(*Events)
if self._process_function is None:
raise ValueError("Engine must be given a processing function in order to run.")
_check_signature(process_function, "process_function", self, None)
# generator provided by self._internal_run_as_gen
self._internal_run_generator: Optional[Generator] = None
def register_events(
self, *event_names: Union[List[str], List[EventEnum]], event_to_attr: Optional[dict] = None
) -> None:
"""Add events that can be fired.
Registering an event will let the user trigger these events at any point.
This opens the door to make the :meth:`~ignite.engine.engine.Engine.run` loop even more
configurable.
By default, the events from :class:`~ignite.engine.events.Events` are registered.
Args:
event_names: Defines the name of the event being supported. New events can be a str
or an object derived from :class:`~ignite.engine.events.EventEnum`. See example below.
event_to_attr: A dictionary to map an event to a state attribute.
Examples:
.. code-block:: python
from ignite.engine import Engine, Events, EventEnum
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
def process_function(e, batch):
# ...
trainer.fire_event("bwd_event")
loss.backward()
# ...
trainer.fire_event("opt_event")
optimizer.step()
trainer = Engine(process_function)
trainer.register_events(*CustomEvents)
trainer.register_events("bwd_event", "opt_event")
@trainer.on(Events.EPOCH_COMPLETED)
def trigger_custom_event():
if required(...):
trainer.fire_event(CustomEvents.FOO_EVENT)
else:
trainer.fire_event(CustomEvents.BAR_EVENT)
@trainer.on(CustomEvents.FOO_EVENT)
def do_foo_op():
# ...
@trainer.on(CustomEvents.BAR_EVENT)
def do_bar_op():
# ...
Example with State Attribute:
.. code-block:: python
from enum import Enum
from ignite.engine import Engine, EventEnum
class TBPTT_Events(EventEnum):
TIME_ITERATION_STARTED = "time_iteration_started"
TIME_ITERATION_COMPLETED = "time_iteration_completed"
TBPTT_event_to_attr = {
TBPTT_Events.TIME_ITERATION_STARTED: 'time_iteration',
TBPTT_Events.TIME_ITERATION_COMPLETED: 'time_iteration'
}
engine = Engine(process_function)
engine.register_events(*TBPTT_Events, event_to_attr=TBPTT_event_to_attr)
engine.run(data)
# engine.state contains an attribute time_iteration, which can be accessed
# using engine.state.time_iteration
"""
if not (event_to_attr is None or isinstance(event_to_attr, dict)):
raise ValueError(f"Expected event_to_attr to be dictionary. Got {type(event_to_attr)}.")
for index, e in enumerate(event_names):
if not isinstance(e, (str, EventEnum)):
raise TypeError(f"Value at {index} of event_names should be a str or EventEnum, but given {e}")
self._allowed_events.append(e)
if event_to_attr and e in event_to_attr:
State.event_to_attr[e] = event_to_attr[e]
# we need to update state attributes associated with new custom events
self.state._update_attrs()
def _handler_wrapper(self, handler: Callable, event_name: Any, event_filter: Callable) -> Callable:
# signature of the following wrapper will be inspected during registering to check if engine is necessary
# we have to build a wrapper with relevant signature : solution is functools.wraps
@functools.wraps(handler)
def wrapper(*args: Any, **kwargs: Any) -> Any:
event = self.state.get_event_attrib_value(event_name)
if event_filter(self, event):
return handler(*args, **kwargs)
# setup input handler as parent to make has_event_handler work
setattr(wrapper, "_parent", weakref.ref(handler))
return wrapper
def _assert_allowed_event(self, event_name: Any) -> None:
if event_name not in self._allowed_events:
self.logger.error(f"attempt to add event handler to an invalid event {event_name}")
raise ValueError(f"Event {event_name} is not a valid event for this {self.__class__.__name__}.")
def add_event_handler(self, event_name: Any, handler: Callable, *args: Any, **kwargs: Any) -> RemovableEventHandle:
"""Add an event handler to be executed when the specified event is fired.
Args:
event_name: An event or a list of events to attach the handler. Valid events are
from :class:`~ignite.engine.events.Events` or any ``event_name`` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
handler: the callable event handler that should be invoked. No restrictions on its signature.
The first argument can be optionally `engine`, the :class:`~ignite.engine.engine.Engine` object,
handler is bound to.
args: optional args to be passed to ``handler``.
kwargs: optional keyword args to be passed to ``handler``.
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
Note:
Note that other arguments can be passed to the handler in addition to the `*args` and `**kwargs`
passed here, for example during :attr:`~ignite.engine.events.Events.EXCEPTION_RAISED`.
Examples:
.. code-block:: python
engine = Engine(process_function)
def print_epoch(engine):
print(f"Epoch: {engine.state.epoch}")
engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch)
events_list = Events.EPOCH_COMPLETED | Events.COMPLETED
def execute_something():
# do some thing not related to engine
pass
engine.add_event_handler(events_list, execute_something)
Note:
Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine.
See :class:`~ignite.engine.events.Events` for more details.
"""
if isinstance(event_name, EventsList):
for e in event_name:
self.add_event_handler(e, handler, *args, **kwargs)
return RemovableEventHandle(event_name, handler, self)
if isinstance(event_name, CallableEventWithFilter) and event_name.filter is not None:
event_filter = event_name.filter
handler = self._handler_wrapper(handler, event_name, event_filter)
self._assert_allowed_event(event_name)
event_args: Tuple[Any, ...] = ()
if event_name == Events.EXCEPTION_RAISED:
event_args += (Exception(),)
elif event_name == Events.TERMINATE_SINGLE_EPOCH:
event_args += (0,)
try:
_check_signature(handler, "handler", self, *(event_args + args), **kwargs)
self._event_handlers[event_name].append((handler, (self,) + args, kwargs))
except ValueError:
_check_signature(handler, "handler", *(event_args + args), **kwargs)
self._event_handlers[event_name].append((handler, args, kwargs))
self.logger.debug(f"Added handler for event {event_name}")
return RemovableEventHandle(event_name, handler, self)
def has_event_handler(self, handler: Callable, event_name: Optional[Any] = None) -> bool:
"""Check if the specified event has the specified handler.
Args:
handler: the callable event handler.
event_name: The event the handler attached to. Set this
to ``None`` to search all events.
"""
if event_name is not None:
if event_name not in self._event_handlers:
return False
events: Union[List[Any], Dict[Any, List]] = [event_name]
else:
events = self._event_handlers
for e in events:
for h, _, _ in self._event_handlers[e]:
if self._compare_handlers(handler, h):
return True
return False
@staticmethod
def _compare_handlers(user_handler: Callable, registered_handler: Callable) -> bool:
if hasattr(registered_handler, "_parent"):
registered_handler = registered_handler._parent()
return registered_handler == user_handler
def remove_event_handler(self, handler: Callable, event_name: Any) -> None:
"""Remove event handler `handler` from registered handlers of the engine
Args:
handler: the callable event handler that should be removed
event_name: The event the handler attached to.
"""
if event_name not in self._event_handlers:
raise ValueError(f"Input event name '{event_name}' does not exist")
new_event_handlers = [
(h, args, kwargs)
for h, args, kwargs in self._event_handlers[event_name]
if not self._compare_handlers(handler, h)
]
if len(new_event_handlers) == len(self._event_handlers[event_name]):
raise ValueError(f"Input handler '{handler}' is not found among registered event handlers")
self._event_handlers[event_name] = new_event_handlers
def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable:
"""Decorator shortcut for :meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.events.Events`
or any ``event_name`` added by :meth:`~ignite.engine.engine.Engine.register_events`.
args: optional args to be passed to `handler`.
kwargs: optional keyword args to be passed to `handler`.
Examples:
.. code-block:: python
engine = Engine(process_function)
@engine.on(Events.EPOCH_COMPLETED)
def print_epoch():
print(f"Epoch: {engine.state.epoch}")
@engine.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def execute_something():
# do some thing not related to engine
pass
"""
def decorator(f: Callable) -> Callable:
self.add_event_handler(event_name, f, *args, **kwargs)
return f
return decorator
def _fire_event(self, event_name: Any, *event_args: Any, **event_kwargs: Any) -> None:
"""Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. Optional positional and keyword arguments can be used to
pass arguments to **all** handlers added with this event. These
arguments updates arguments passed using :meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
*event_args: optional args to be passed to all handlers.
**event_kwargs: optional keyword args to be passed to all handlers.
"""
self.logger.debug(f"{self.state.epoch} | {self.state.iteration}, Firing handlers for event {event_name}")
self.last_event_name = event_name
for func, args, kwargs in self._event_handlers[event_name]:
kwargs.update(event_kwargs)
first, others = ((args[0],), args[1:]) if (args and args[0] == self) else ((), args)
func(*first, *(event_args + others), **kwargs)
def fire_event(self, event_name: Any) -> None:
"""Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. This is the method used in :meth:`~ignite.engine.engine.Engine.run` to call the
core events found in :class:`~ignite.engine.events.Events`.
Custom events can be fired if they have been registered before with
:meth:`~ignite.engine.engine.Engine.register_events`. The engine `state` attribute should be used
to exchange "dynamic" data among `process_function` and handlers.
This method is called automatically for core events. If no custom
events are used in the engine, there is no need for the user to call
the method.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
"""
self._assert_allowed_event(event_name)
return self._fire_event(event_name)
def interrupt(self) -> None:
"""Sends interrupt signal to the engine, so that it interrupts the run after
the current iteration. The run can be resumed by calling
:meth:`~ignite.engine.engine.Engine.run`. Data iteration will continue from the interrupted state.
Examples:
.. testcode::
from ignite.engine import Engine, Events
data = range(10)
max_epochs = 3
def check_input_data(e, b):
print(f"Epoch {engine.state.epoch}, Iter {engine.state.iteration} | data={b}")
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
@engine.on(Events.ITERATION_COMPLETED(every=11))
def call_interrupt():
engine.interrupt()
print("Start engine run with interruptions:")
state = engine.run(data, max_epochs=max_epochs)
print("1 Engine run is interrupted at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("2 Engine run is interrupted at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("3 Engine ended the run at ", state.epoch, state.iteration)
.. dropdown:: Output
.. testoutput::
Start engine run with interruptions:
Epoch 1, Iter 1 | data=0
Epoch 1, Iter 2 | data=1
Epoch 1, Iter 3 | data=2
Epoch 1, Iter 4 | data=3
Epoch 1, Iter 5 | data=4
Epoch 1, Iter 6 | data=5
Epoch 1, Iter 7 | data=6
Epoch 1, Iter 8 | data=7
Epoch 1, Iter 9 | data=8
Epoch 1, Iter 10 | data=9
Epoch 2, Iter 11 | data=0
1 Engine run is interrupted at 2 11
Epoch 2, Iter 12 | data=1
Epoch 2, Iter 13 | data=2
Epoch 2, Iter 14 | data=3
Epoch 2, Iter 15 | data=4
Epoch 2, Iter 16 | data=5
Epoch 2, Iter 17 | data=6
Epoch 2, Iter 18 | data=7
Epoch 2, Iter 19 | data=8
Epoch 2, Iter 20 | data=9
Epoch 3, Iter 21 | data=0
Epoch 3, Iter 22 | data=1
2 Engine run is interrupted at 3 22
Epoch 3, Iter 23 | data=2
Epoch 3, Iter 24 | data=3
Epoch 3, Iter 25 | data=4
Epoch 3, Iter 26 | data=5
Epoch 3, Iter 27 | data=6
Epoch 3, Iter 28 | data=7
Epoch 3, Iter 29 | data=8
Epoch 3, Iter 30 | data=9
3 Engine ended the run at 3 30
.. versionadded:: 0.4.10
"""
if not self.interrupt_resume_enabled:
raise RuntimeError(
"Engine 'interrupt/resume' feature is disabled. "
"Please, set Engine.interrupt_resume_enabled=True to enable it"
)
self.logger.info("interrupt signaled. Engine will interrupt the run after current iteration is finished.")
self.should_interrupt = True
def terminate(self) -> None:
"""Sends terminate signal to the engine, so that it terminates completely the run. The run is
terminated after the event on which ``terminate`` method was called. The following events are triggered:
- ...
- Terminating event
- :attr:`~ignite.engine.events.Events.TERMINATE`
- :attr:`~ignite.engine.events.Events.COMPLETED`
Examples:
.. testcode::
from ignite.engine import Engine, Events
def func(engine, batch):
print(engine.state.epoch, engine.state.iteration, " | ", batch)
max_epochs = 4
data = range(10)
engine = Engine(func)
@engine.on(Events.ITERATION_COMPLETED(once=14))
def terminate():
print(f"-> terminate at iteration: {engine.state.iteration}")
engine.terminate()
print("Start engine run:")
state = engine.run(data, max_epochs=max_epochs)
print("1 Engine run is terminated at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("2 Engine ended the run at ", state.epoch, state.iteration)
.. dropdown:: Output
.. testoutput::
Start engine run:
1 1 | 0
1 2 | 1
1 3 | 2
1 4 | 3
1 5 | 4
1 6 | 5
1 7 | 6
1 8 | 7
1 9 | 8
1 10 | 9
2 11 | 0
2 12 | 1
2 13 | 2
2 14 | 3
-> terminate at iteration: 14
1 Engine run is terminated at 2 14
3 15 | 0
3 16 | 1
3 17 | 2
3 18 | 3
3 19 | 4
3 20 | 5
3 21 | 6
3 22 | 7
3 23 | 8
3 24 | 9
4 25 | 0
4 26 | 1
4 27 | 2
4 28 | 3
4 29 | 4
4 30 | 5
4 31 | 6
4 32 | 7
4 33 | 8
4 34 | 9
2 Engine ended the run at 4 34
.. versionchanged:: 0.4.10
Behaviour changed, for details see https://github.com/pytorch/ignite/issues/2669
"""
self.logger.info("Terminate signaled. Engine will stop after current iteration is finished.")
self.should_terminate = True
def terminate_epoch(self) -> None:
"""Sends terminate signal to the engine, so that it terminates the current epoch. The run
continues from the next epoch. The following events are triggered:
- ...
- Event on which ``terminate_epoch`` method is called
- :attr:`~ignite.engine.events.Events.TERMINATE_SINGLE_EPOCH`
- :attr:`~ignite.engine.events.Events.EPOCH_COMPLETED`
- :attr:`~ignite.engine.events.Events.EPOCH_STARTED`
- ...
"""
self.logger.info(
"Terminate current epoch is signaled. "
"Current epoch iteration will stop after current iteration is finished."
)
self.should_terminate_single_epoch = True
def _handle_exception(self, e: BaseException) -> None:
if Events.EXCEPTION_RAISED in self._event_handlers:
self._fire_event(Events.EXCEPTION_RAISED, e)
else:
raise e
@property
def state_dict_user_keys(self) -> List:
return self._state_dict_user_keys
def state_dict(self) -> OrderedDict:
"""Returns a dictionary containing engine's state: "epoch_length", "max_epochs" and "iteration" and
other state values defined by `engine.state_dict_user_keys`
.. code-block:: python
engine = Engine(...)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
...
@engine.on(Events.STARTED)
def init_user_value(_):
engine.state.alpha = 0.1
engine.state.beta = 1.0
@engine.on(Events.COMPLETED)
def save_engine(_):
state_dict = engine.state_dict()
assert "alpha" in state_dict and "beta" in state_dict
torch.save(state_dict, "/tmp/engine.pt")
Returns:
OrderedDict:
a dictionary containing engine's state
"""
keys: Tuple[str, ...] = self._state_dict_all_req_keys + (self._state_dict_one_of_opt_keys[0],)
keys += tuple(self._state_dict_user_keys)
return OrderedDict([(k, getattr(self.state, k)) for k in keys])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Setups engine from `state_dict`.
State dictionary should contain keys: `iteration` or `epoch`, `max_epochs` and `epoch_length`.
If `engine.state_dict_user_keys` contains keys, they should be also present in the state dictionary.
Iteration and epoch values are 0-based: the first iteration or epoch is zero.
This method does not remove any custom attributes added by user.
Args:
state_dict: a dict with parameters
.. code-block:: python
# Restore from the 4rd epoch
state_dict = {"epoch": 3, "max_epochs": 100, "epoch_length": len(data_loader)}
# or 500th iteration
# state_dict = {"iteration": 499, "max_epochs": 100, "epoch_length": len(data_loader)}
trainer = Engine(...)
trainer.load_state_dict(state_dict)
trainer.run(data)
"""
super(Engine, self).load_state_dict(state_dict)
for k in self._state_dict_user_keys:
if k not in state_dict:
raise ValueError(
f"Required user state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'"
)
self.state.max_epochs = state_dict["max_epochs"]
self.state.epoch_length = state_dict["epoch_length"]
for k in self._state_dict_user_keys:
setattr(self.state, k, state_dict[k])
if "iteration" in state_dict:
self.state.iteration = state_dict["iteration"]
self.state.epoch = 0
if self.state.epoch_length is not None:
self.state.epoch = self.state.iteration // self.state.epoch_length
elif "epoch" in state_dict:
self.state.epoch = state_dict["epoch"]
if self.state.epoch_length is None:
raise ValueError(
"If epoch is provided in the state dict, epoch_length should not be None. "
f"Input state_dict: {state_dict}"
)
self.state.iteration = self.state.epoch_length * self.state.epoch
@staticmethod
def _is_done(state: State) -> bool:
is_done_iters = state.max_iters is not None and state.iteration >= state.max_iters
is_done_count = (
state.epoch_length is not None
and state.max_epochs is not None
and state.iteration >= state.epoch_length * state.max_epochs
)
is_done_epochs = state.max_epochs is not None and state.epoch >= state.max_epochs
return is_done_iters or is_done_count or is_done_epochs
def set_data(self, data: Union[Iterable, DataLoader]) -> None:
"""Method to set data. After calling the method the next batch passed to `processing_function` is
from newly provided data. Please, note that epoch length is not modified.
Args:
data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).
Examples:
User can switch data provider during the training:
.. code-block:: python
data1 = ...
data2 = ...
switch_iteration = 5000
def train_step(e, batch):
# when iteration <= switch_iteration
# batch is from data1
# when iteration > switch_iteration
# batch is from data2
...
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=100)
"""
self.state.dataloader = data
self._dataloader_iter = iter(self.state.dataloader)
def run(
self,
data: Optional[Iterable] = None,
max_epochs: Optional[int] = None,
max_iters: Optional[int] = None,
epoch_length: Optional[int] = None,
) -> State:
"""Runs the ``process_function`` over the passed data.
Engine has a state and the following logic is applied in this function:
- At the first call, new state is defined by `max_epochs`, `max_iters`, `epoch_length`, if provided.
A timer for total and per-epoch time is initialized when Events.STARTED is handled.
- If state is already defined such that there are iterations to run until `max_epochs` and no input arguments
provided, state is kept and used in the function.
- If state is defined and engine is "done" (no iterations to run until `max_epochs`), a new state is defined.
- If state is defined, engine is NOT "done", then input arguments if provided override defined state.
Args:
data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). If not provided, then
``epoch_length`` is required and ``batch`` argument of ``process_function`` will be ``None``.
max_epochs: Max epochs to run for (default: None).
If a new state should be created (first run or run again from ended engine), it's default value is 1.
If run is resuming from a state, provided `max_epochs` will be taken into account and should be larger
than `engine.state.max_epochs`.
epoch_length: Number of iterations to count as one epoch. By default, it can be set as
`len(data)`. If `data` is an iterator and `epoch_length` is not set, then it will be automatically
determined as the iteration on which data iterator raises `StopIteration`.
This argument should not change if run is resuming from a state.
max_iters: Number of iterations to run for.
`max_iters` and `max_epochs` are mutually exclusive; only one of the two arguments should be provided.
Returns:
State: output state.
Note:
User can dynamically preprocess input batch at :attr:`~ignite.engine.events.Events.ITERATION_STARTED` and
store output batch in `engine.state.batch`. Latter is passed as usually to `process_function` as argument:
.. code-block:: python
trainer = ...
@trainer.on(Events.ITERATION_STARTED)
def switch_batch(engine):
engine.state.batch = preprocess_batch(engine.state.batch)
Restart the training from the beginning. User can reset `max_epochs = None`:
.. code-block:: python
# ...
trainer.run(train_loader, max_epochs=5)
# Reset model weights etc. and restart the training
trainer.state.max_epochs = None
trainer.run(train_loader, max_epochs=2)
"""
if data is not None and not isinstance(data, Iterable):
raise TypeError("Argument data should be iterable")
if self.state.max_epochs is not None:
# Check and apply overridden parameters
if max_epochs is not None:
if max_epochs < self.state.epoch:
raise ValueError(
"Argument max_epochs should be greater than or equal to the start "
f"epoch defined in the state: {max_epochs} vs {self.state.epoch}. "
"Please, set engine.state.max_epochs = None "
"before calling engine.run() in order to restart the training from the beginning."
)
self.state.max_epochs = max_epochs
if epoch_length is not None:
if epoch_length != self.state.epoch_length:
raise ValueError(
"Argument epoch_length should be same as in the state, "
f"but given {epoch_length} vs {self.state.epoch_length}"
)
if self.state.max_epochs is None or (self._is_done(self.state) and self._internal_run_generator is None):
# Create new state
if epoch_length is None:
if data is None:
raise ValueError("epoch_length should be provided if data is None")
epoch_length = self._get_data_length(data)
if epoch_length is not None and epoch_length < 1:
raise ValueError("Input data has zero size. Please provide non-empty data")
if max_iters is None:
if max_epochs is None:
max_epochs = 1
else:
if max_epochs is not None:
raise ValueError(
"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters."
)
if epoch_length is not None:
max_epochs = math.ceil(max_iters / epoch_length)
self.state.iteration = 0
self.state.epoch = 0
self.state.max_epochs = max_epochs
self.state.max_iters = max_iters
self.state.epoch_length = epoch_length
# Reset generator if previously used
self._internal_run_generator = None
self.logger.info(f"Engine run starting with max_epochs={max_epochs}.")
else:
self.logger.info(
f"Engine run resuming from iteration {self.state.iteration}, "
f"epoch {self.state.epoch} until {self.state.max_epochs} epochs"
)
if self.state.epoch_length is None and data is None:
raise ValueError("epoch_length should be provided if data is None")
if self.should_terminate:
# If engine was terminated and now is resuming from terminated state
# we need to initialize iter_counter as 0
self._init_iter = 0
if self._dataloader_iter is None:
self.state.dataloader = data
if self.interrupt_resume_enabled:
return self._internal_run()
else:
return self._internal_run_legacy()
@staticmethod
def _init_timers(state: State) -> None:
state.times[Events.EPOCH_COMPLETED.name] = 0.0
state.times[Events.COMPLETED.name] = 0.0
def _get_data_length(self, data: Iterable) -> Optional[int]:
try:
if hasattr(data, "__len__"):
return len(data) # type: ignore[arg-type]
except TypeError:
# _InfiniteConstantSampler can raise a TypeError on DataLoader length of a IterableDataset
pass
return None
def _setup_dataloader_iter(self) -> None:
if self.state.dataloader is None:
if self.state.epoch_length is None:
raise RuntimeError(
"Internal error, self.state.epoch_length is None. "
"Please, file an issue if you encounter this error."
)
self._dataloader_iter = _get_none_data_iter(self.state.epoch_length)
else:
self._dataloader_iter = iter(self.state.dataloader)
def _setup_engine(self) -> None:
self._setup_dataloader_iter()
if self._init_iter is None:
iteration = self.state.iteration
# Below we define initial counter value for _run_once_on_dataset to measure a single epoch
if self.state.epoch_length is not None:
iteration %= self.state.epoch_length
self._init_iter = iteration
def _internal_run(self) -> State:
if self._internal_run_generator is None:
self._internal_run_generator = self._internal_run_as_gen()
try:
return next(self._internal_run_generator)
except StopIteration as out:
self._internal_run_generator = None
return out.value
def _internal_run_as_gen(self) -> Generator:
self.should_terminate = self.should_terminate_single_epoch = self.should_interrupt = False
self._init_timers(self.state)
try:
try:
start_time = time.time()
self._fire_event(Events.STARTED)
yield from self._maybe_terminate_or_interrupt()
while not self._is_done(self.state) and not self.should_terminate:
self.state.epoch += 1
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_STARTED)
epoch_time_taken = time.time() - handlers_start_time
yield from self._maybe_terminate_or_interrupt()
if self._dataloader_iter is None:
self._setup_engine()
epoch_time_taken += yield from self._run_once_on_dataset_as_gen()
# time is available for handlers but must be updated after fire
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_COMPLETED)
epoch_time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
yield from self._maybe_terminate_or_interrupt()
hours, mins, secs = _to_hours_mins_secs(epoch_time_taken)
self.logger.info(
f"Epoch[{self.state.epoch}] Complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}"
)
except _EngineTerminateException:
self._fire_event(Events.TERMINATE)
time_taken = time.time() - start_time
# time is available for handlers but must be updated after fire
self.state.times[Events.COMPLETED.name] = time_taken
handlers_start_time = time.time()
self._fire_event(Events.COMPLETED)
time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.COMPLETED.name] = time_taken
hours, mins, secs = _to_hours_mins_secs(time_taken)
self.logger.info(f"Engine run complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}")
except BaseException as e:
self._dataloader_iter = None
self.logger.error(f"Engine run is terminating due to exception: {e}")
self._handle_exception(e)
self._dataloader_iter = None
return self.state
def _maybe_terminate_or_interrupt(self) -> Generator:
if self.should_terminate:
raise _EngineTerminateException()
if self.should_terminate_single_epoch:
raise _EngineTerminateSingleEpochException()
if self.should_interrupt:
self._fire_event(Events.INTERRUPT)
self.should_interrupt = False
yield self.state
def _run_once_on_dataset_as_gen(self) -> Generator[State, None, float]:
start_time = time.time()
# We need to setup iter_counter > 0 if we resume from an iteration
iter_counter = 0 if self._init_iter is None else self._init_iter
self._init_iter = None
should_exit = False
try:
if self._dataloader_iter is None:
raise RuntimeError(
"Internal error, self._dataloader_iter is None. "
"Please, file an issue if you encounter this error."
)
while True:
self.state.batch = self.state.output = None
try:
# Avoid Events.GET_BATCH_STARTED triggered twice when data iter is restarted
if self.last_event_name != Events.DATALOADER_STOP_ITERATION:
self._fire_event(Events.GET_BATCH_STARTED)
yield from self._maybe_terminate_or_interrupt()
self.state.batch = next(self._dataloader_iter)
self._fire_event(Events.GET_BATCH_COMPLETED)
yield from self._maybe_terminate_or_interrupt()
iter_counter += 1
should_exit = False
except StopIteration:
# Define self.state.epoch_length if it is not yet set
if self.state.epoch_length is None:
# Define epoch length and stop the epoch
self.state.epoch_length = iter_counter
if self.state.max_iters is not None:
self.state.max_epochs = math.ceil(self.state.max_iters / self.state.epoch_length)
break
# Should exit while loop if we can not iterate
if should_exit:
if not self._is_done(self.state):
total_iters = (
self.state.epoch_length * self.state.max_epochs
if self.state.max_epochs is not None
else self.state.max_iters
)
warnings.warn(
"Data iterator can not provide data anymore but required total number of "
"iterations to run is not reached. "
f"Current iteration: {self.state.iteration} vs Total iterations to run : {total_iters}"
)
break
self._fire_event(Events.DATALOADER_STOP_ITERATION)
yield from self._maybe_terminate_or_interrupt()
self._setup_dataloader_iter()
should_exit = True
continue
self.state.iteration += 1
self._fire_event(Events.ITERATION_STARTED)
yield from self._maybe_terminate_or_interrupt()
self.state.output = self._process_function(self, self.state.batch)
self._fire_event(Events.ITERATION_COMPLETED)
yield from self._maybe_terminate_or_interrupt()
if self.state.epoch_length is not None and iter_counter == self.state.epoch_length:
break
if self.state.max_iters is not None and self.state.iteration == self.state.max_iters:
self.should_terminate = True
raise _EngineTerminateException()
except _EngineTerminateSingleEpochException:
self._fire_event(Events.TERMINATE_SINGLE_EPOCH, iter_counter=iter_counter)
self.should_terminate_single_epoch = False
self._setup_dataloader_iter()
except _EngineTerminateException as e:
# we need to reraise this exception such that it is not handled
# as a general exception by the code below
raise e
except Exception as e:
self.logger.error(f"Current run is terminating due to exception: {e}")
self._handle_exception(e)
return time.time() - start_time
def _maybe_terminate_legacy(self) -> None:
if self.should_terminate:
raise _EngineTerminateException()
if self.should_terminate_single_epoch:
raise _EngineTerminateSingleEpochException()
def _internal_run_legacy(self) -> State:
# internal_run without generator for BC
self.should_terminate = self.should_terminate_single_epoch = self.should_interrupt = False
self._init_timers(self.state)
try:
try:
start_time = time.time()
self._fire_event(Events.STARTED)
self._maybe_terminate_legacy()
while not self._is_done(self.state) and not self.should_terminate:
self.state.epoch += 1
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_STARTED)
epoch_time_taken = time.time() - handlers_start_time
self._maybe_terminate_legacy()
if self._dataloader_iter is None:
self._setup_engine()
epoch_time_taken += self._run_once_on_dataset_legacy()
# time is available for handlers but must be updated after fire
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_COMPLETED)
epoch_time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
self._maybe_terminate_legacy()
hours, mins, secs = _to_hours_mins_secs(epoch_time_taken)
self.logger.info(
f"Epoch[{self.state.epoch}] Complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}"
)
except _EngineTerminateException:
self._fire_event(Events.TERMINATE)
time_taken = time.time() - start_time
# time is available for handlers but must be updated after fire
self.state.times[Events.COMPLETED.name] = time_taken
handlers_start_time = time.time()
self._fire_event(Events.COMPLETED)
time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.COMPLETED.name] = time_taken
hours, mins, secs = _to_hours_mins_secs(time_taken)
self.logger.info(f"Engine run complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}")
except BaseException as e:
self._dataloader_iter = None
self.logger.error(f"Engine run is terminating due to exception: {e}")
self._handle_exception(e)
self._dataloader_iter = None
return self.state
def _run_once_on_dataset_legacy(self) -> float:
start_time = time.time()
# We need to setup iter_counter > 0 if we resume from an iteration
iter_counter = 0 if self._init_iter is None else self._init_iter
self._init_iter = None
should_exit = False
try:
if self._dataloader_iter is None:
raise RuntimeError(
"Internal error, self._dataloader_iter is None. "
"Please, file an issue if you encounter this error."
)
while True:
self.state.batch = self.state.output = None
try:
# Avoid Events.GET_BATCH_STARTED triggered twice when data iter is restarted
if self.last_event_name != Events.DATALOADER_STOP_ITERATION:
self._fire_event(Events.GET_BATCH_STARTED)
self._maybe_terminate_legacy()
self.state.batch = next(self._dataloader_iter)
self._fire_event(Events.GET_BATCH_COMPLETED)
self._maybe_terminate_legacy()
iter_counter += 1
should_exit = False
except StopIteration:
# Define self.state.epoch_length if it is not yet set
if self.state.epoch_length is None:
# Define epoch length and stop the epoch
self.state.epoch_length = iter_counter
if self.state.max_iters is not None:
self.state.max_epochs = math.ceil(self.state.max_iters / self.state.epoch_length)
break
# Should exit while loop if we can not iterate
if should_exit:
if not self._is_done(self.state):
total_iters = (
self.state.epoch_length * self.state.max_epochs
if self.state.max_epochs is not None
else self.state.max_iters
)
warnings.warn(
"Data iterator can not provide data anymore but required total number of "
"iterations to run is not reached. "
f"Current iteration: {self.state.iteration} vs Total iterations to run : {total_iters}"
)
break
self._fire_event(Events.DATALOADER_STOP_ITERATION)
self._maybe_terminate_legacy()
self._setup_dataloader_iter()
should_exit = True
continue
self.state.iteration += 1
self._fire_event(Events.ITERATION_STARTED)
self._maybe_terminate_legacy()
self.state.output = self._process_function(self, self.state.batch)
self._fire_event(Events.ITERATION_COMPLETED)
self._maybe_terminate_legacy()
if self.state.epoch_length is not None and iter_counter == self.state.epoch_length:
break
if self.state.max_iters is not None and self.state.iteration == self.state.max_iters:
self.should_terminate = True
raise _EngineTerminateException()
except _EngineTerminateSingleEpochException:
self._fire_event(Events.TERMINATE_SINGLE_EPOCH, iter_counter=iter_counter)
self.should_terminate_single_epoch = False
self._setup_dataloader_iter()
except _EngineTerminateException as e:
# we need to reraise this exception such that it is not handled
# as a general exception by the code below
raise e
except Exception as e:
self.logger.error(f"Current run is terminating due to exception: {e}")
self._handle_exception(e)
return time.time() - start_time
def _get_none_data_iter(size: int) -> Iterator:
# Sized iterator for data as None
for _ in range(size):
yield None
class _EngineTerminateSingleEpochException(Exception):
"""
Exception associated with Terminate Single Epoch event
"""
pass
class _EngineTerminateException(Exception):
"""
Exception associated with Terminate event
"""
pass
|
import inspect
from typing import Any, Callable, Tuple, Union
def _check_signature(fn: Callable, fn_description: str, *args: Any, **kwargs: Any) -> None:
# if handler with filter, check the handler rather than the decorator
if hasattr(fn, "_parent"):
signature = inspect.signature(fn._parent())
else:
signature = inspect.signature(fn)
try: # try without engine
signature.bind(*args, **kwargs)
except TypeError as exc:
fn_params = list(signature.parameters)
exception_msg = str(exc)
passed_params = list(args) + list(kwargs)
raise ValueError(
f"Error adding {fn} '{fn_description}': "
f"takes parameters {fn_params} but will be called with {passed_params}"
f"({exception_msg})."
)
def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, float]:
"""Convert seconds to hours, mins, seconds and milliseconds."""
mins, secs = divmod(time_taken, 60)
hours, mins = divmod(mins, 60)
return round(hours), round(mins), secs
|
import warnings
from copy import deepcopy
from typing import Optional, Union
import torch.nn as nn
from ignite.engine import CallableEventWithFilter, Engine, Events, EventsList
from ignite.handlers.param_scheduler import BaseParamScheduler
from ignite.handlers.state_param_scheduler import LambdaStateScheduler
__all__ = ["EMAHandler"]
class EMAWarmUp:
def __init__(self, momentum_warmup: float, warmup_iters: int, momentum: float) -> None:
self.momentum_warmup = momentum_warmup
self.warmup_iters = warmup_iters
self.momentum = momentum
def __call__(self, event_index: int) -> float:
denominator = max(1, self.warmup_iters - 1)
curr_momentum = self.momentum_warmup + (self.momentum - self.momentum_warmup) * (event_index - 1) / denominator
if self.momentum >= self.momentum_warmup:
return min(self.momentum, curr_momentum)
else:
return max(self.momentum, curr_momentum)
class EMAHandler:
r"""Exponential moving average (EMA) handler can be used to compute a smoothed version of model.
The EMA model is updated as follows:
.. math:: \theta_{\text{EMA}, t+1} = (1 - \lambda) \cdot \theta_{\text{EMA}, t} + \lambda \cdot \theta_{t}
where :math:`\theta_{\text{EMA}, t}` and :math:`\theta_{t}` are the EMA weights and online model weights at
:math:`t`-th iteration, respectively; :math:`\lambda` is the update momentum. Current momentum can be retrieved
from ``Engine.state.ema_momentum``.
Args:
model: the online model for which an EMA model will be computed. If ``model`` is ``DataParallel`` or
``DistributedDataParallel``, the EMA smoothing will be applied to ``model.module`` .
momentum: the update momentum after warmup phase, should be float in range :math:`\left(0, 1 \right)`.
momentum_warmup: the initial update momentum during warmup phase.
warmup_iters: iterations of warmup.
handle_buffers: how to handle model buffers during training. There are three options: 1. "copy" means
copying the buffers of the online model; 2. "update" means applying EMA to the buffers of the online
model; 3. "ema_train" means set the EMA model to ``train`` mode and skip copying or updating the buffers.
Attributes:
ema_model: the exponential moving averaged model.
model: the online model that is tracked by EMAHandler. It is ``model.module`` if ``model`` in
the initialization method is an instance of ``DistributedDataParallel``.
momentum: the update momentum.
handle_buffers: how to handle model buffers during training.
Note:
The EMA model is already in ``eval`` mode if ``handle_buffers`` is "copy" or "update". If model in the
arguments is an ``nn.Module`` or ``DistributedDataParallel``, the EMA model is an ``nn.Module`` and it is on
the same device as the online model. If the model is an ``nn.DataParallel``, then the EMA model is an
``nn.DataParallel``.
Note:
It is recommended to initialize and use an EMA handler in following steps:
1. Initialize ``model`` (``nn.Module`` or ``DistributedDataParallel``) and ``ema_handler`` (``EMAHandler``).
2. Build ``trainer`` (``ignite.engine.Engine``).
3. Resume from checkpoint for ``model`` and ``ema_handler.ema_model``.
4. Attach ``ema_handler`` to ``trainer``.
Examples:
.. code-block:: python
device = torch.device("cuda:0")
model = nn.Linear(2, 1).to(device)
# update the ema every 5 iterations
ema_handler = EMAHandler(model, momentum=0.0002)
# get the ema model, which is an instance of nn.Module
ema_model = ema_handler.ema_model
trainer = Engine(train_step_fn)
to_load = {"model": model, "ema_model", ema_model, "trainer", trainer}
if resume_from is not None:
Checkpoint.load_objects(to_load, checkpoint=resume_from)
# update the EMA model every 5 iterations
ema_handler.attach(trainer, name="ema_momentum", event=Events.ITERATION_COMPLETED(every=5))
# add other handlers
to_save = to_load
ckpt_handler = Checkpoint(to_save, DiskSaver(...), ...)
trainer.add_event_handler(Events.EPOCH_COMPLETED, ckpt_handler)
# current momentum can be retrieved from engine.state,
# the attribute name is the `name` parameter used in the attach function
@trainer.on(Events.ITERATION_COMPLETED):
def print_ema_momentum(engine):
print(f"current momentum: {engine.state.ema_momentum}"
# use ema model for validation
val_step_fn = get_val_step_fn(ema_model)
evaluator = Engine(val_step_fn)
@trainer.on(Events.EPOCH_COMPLETED)
def run_validation(engine):
engine.run(val_data_loader)
trainer.run(...)
The following example shows how to perform warm-up to the EMA momentum:
.. code-block:: python
device = torch.device("cuda:0")
model = nn.Linear(2, 1).to(device)
# linearly change the EMA momentum from 0.2 to 0.002 in the first 100 iterations,
# then keep a constant EMA momentum of 0.002 afterwards
ema_handler = EMAHandler(model, momentum=0.002, momentum_warmup=0.2, warmup_iters=100)
engine = Engine(step_fn)
ema_handler.attach(engine, name="ema_momentum")
engine.run(...)
The following example shows how to attach two handlers to the same trainer:
.. code-block:: python
generator = build_generator(...)
discriminator = build_discriminator(...)
gen_handler = EMAHandler(generator)
disc_handler = EMAHandler(discriminator)
step_fn = get_step_fn(...)
engine = Engine(step_fn)
# update EMA model of generator every 1 iteration
gen_handler.attach(engine, "gen_ema_momentum", event=Events.ITERATION_COMPLETED)
# update EMA model of discriminator every 2 iteration
disc_handler.attach(engine, "dis_ema_momentum", event=Events.ITERATION_COMPLETED(every=2))
@engine.on(Events.ITERATION_COMPLETED)
def print_ema_momentum(engine):
print(f"current momentum for generator: {engine.state.gen_ema_momentum}")
print(f"current momentum for discriminator: {engine.state.disc_ema_momentum}")
engine.run(...)
.. versionadded:: 0.4.6
"""
def __init__(
self,
model: nn.Module,
momentum: float = 0.0002,
momentum_warmup: Optional[float] = None,
warmup_iters: Optional[int] = None,
handle_buffers: str = "copy",
) -> None:
if not 0 < momentum < 1:
raise ValueError(f"Invalid momentum: {momentum}")
self.momentum = momentum
self._momentum_lambda_obj: Optional[EMAWarmUp] = None
if momentum_warmup is not None and warmup_iters is not None:
self.momentum_scheduler: Optional[BaseParamScheduler] = None
self._momentum_lambda_obj = EMAWarmUp(momentum_warmup, warmup_iters, momentum)
if not isinstance(model, nn.Module):
raise ValueError(
f"model should be an instance of nn.Module or its subclasses, but got"
f"model: {model.__class__.__name__}"
)
if isinstance(model, nn.parallel.DistributedDataParallel):
model = model.module
self.model = model
self.ema_model = deepcopy(self.model)
for param in self.ema_model.parameters():
param.detach_()
if handle_buffers not in ("copy", "update", "ema_train"):
raise ValueError(
f"handle_buffers can only be one of 'copy', 'update', 'ema_train', " f"but got {handle_buffers}"
)
self.handle_buffers = handle_buffers
if self.handle_buffers == "ema_train":
self.ema_model.train()
else:
self.ema_model.eval()
def _update_ema_model(self, engine: Engine, name: str) -> None:
"""Update weights of ema model"""
momentum = getattr(engine.state, name)
for ema_p, model_p in zip(self.ema_model.parameters(), self.model.parameters()):
ema_p.mul_(1.0 - momentum).add_(model_p.data, alpha=momentum)
if self.handle_buffers == "update":
for ema_b, model_b in zip(self.ema_model.buffers(), self.model.buffers()):
try:
ema_b.mul_(1.0 - momentum).add_(model_b.data, alpha=momentum)
except RuntimeError:
# Handle the case where ema_b is torch.int64, torch.int32 etc.,
# where a runtime error will be thrown when performing the in-place operations with floats.
# In this case, just copy the data
ema_b.data = model_b.data
elif self.handle_buffers == "copy":
# assign the buffers
for ema_b, model_b in zip(self.ema_model.buffers(), self.model.buffers()):
ema_b.data = model_b.data
else:
pass
def attach(
self,
engine: Engine,
name: str = "ema_momentum",
warn_if_exists: bool = True,
event: Union[str, Events, CallableEventWithFilter, EventsList] = Events.ITERATION_COMPLETED,
) -> None:
"""Attach the handler to engine. After the handler is attached, the ``Engine.state`` will add an new attribute
with name ``name`` if the attribute does not exist. Then, the current momentum can be retrieved from
``Engine.state`` when the engine runs.
Note:
There are two cases where a momentum with name ``name`` already exists: 1. the engine has loaded its
state dict after resuming. In this case, there is no need to initialize the momentum again, and users
can set ``warn_if_exists`` to False to suppress the warning message; 2. another handler has created
a state attribute with the same name. In this case, users should choose another name for the ema momentum.
Args:
engine: trainer to which the handler will be attached.
name: attribute name for retrieving EMA momentum from ``Engine.state``. It should be a unique name since a
trainer can have multiple EMA handlers.
warn_if_exists: if True, a warning will be thrown if the momentum with name ``name`` already exists.
event: event when the EMA momentum and EMA model are updated.
"""
if hasattr(engine.state, name):
if warn_if_exists:
warnings.warn(
f"Attribute '{name}' already exists in Engine.state. It might because 1. the engine has loaded its "
f"state dict or 2. {name} is already created by other handlers. Turn off this warning by setting"
f"warn_if_exists to False.",
category=UserWarning,
)
else:
setattr(engine.state, name, self.momentum)
if self._momentum_lambda_obj is not None:
self.momentum_scheduler = LambdaStateScheduler(self._momentum_lambda_obj, param_name="ema_momentum")
# first update the momentum and then update the EMA model
self.momentum_scheduler.attach(engine, event)
engine.add_event_handler(event, self._update_ema_model, name)
|
import itertools
import math
import numbers
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from copy import copy
from pathlib import Path
from typing import Any, cast, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
# https://github.com/pytorch/ignite/issues/2773
try:
from torch.optim.lr_scheduler import LRScheduler as PyTorchLRScheduler
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as PyTorchLRScheduler
from ignite.engine import Engine
class BaseParamScheduler(metaclass=ABCMeta):
r"""An abstract class for updating an engine state or optimizer's parameter value during
training.
Args:
param_name: name of engine state or optimizer's parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
.. versionadded:: 0.4.7
"""
def __init__(self, param_name: str, save_history: bool = False):
self.param_name = param_name
self.event_index = 0
self._save_history = save_history
self._state_attrs = ["event_index", "param_name", "save_history"]
@property
def save_history(self) -> bool:
return self._save_history
@save_history.setter
def save_history(self, value: bool) -> None:
self._save_history = value
def state_dict(self) -> Dict[str, Any]:
"""Returns a dictionary containing a whole state of BaseParamScheduler.
Returns:
dict:
a dictionary containing a whole state of BaseParamScheduler
"""
destination = OrderedDict()
for name in self._state_attrs:
if hasattr(self, name):
val = getattr(self, name)
if hasattr(val, "state_dict"):
val = val.state_dict()
destination[name] = copy(val)
return destination
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this BaseParamScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
for name in self._state_attrs:
if name not in state_dict:
raise ValueError(
f"Required state attribute '{name}' is absent in provided state_dict '{state_dict.keys()}'"
)
val = state_dict[name]
obj = getattr(self, name)
if isinstance(val, Mapping) and hasattr(obj, "load_state_dict"):
obj.load_state_dict(val)
else:
setattr(self, name, val)
@abstractmethod
def get_param(self) -> Union[List[float], float]:
"""Method to get current parameter values
Returns:
list of params, or scalar param
"""
pass
@classmethod
@abstractmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
"""
pass
@classmethod
def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any:
"""Method to plot simulated scheduled values during `num_events` events.
This class requires `matplotlib package <https://matplotlib.org/>`_ to be installed:
.. code-block:: bash
pip install matplotlib
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
matplotlib.lines.Line2D
Examples:
.. code-block:: python
import matplotlib.pylab as plt
plt.figure(figsize=(10, 7))
LinearCyclicalScheduler.plot_values(num_events=50, param_name='lr',
start_value=1e-1, end_value=1e-3, cycle_size=10))
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ModuleNotFoundError(
"This method requires matplotlib to be installed. "
"Please install it with command: \n pip install matplotlib"
)
values = cls.simulate_values(num_events=num_events, **scheduler_kwargs)
label = scheduler_kwargs.get("param_name", "learning rate")
ax = plt.plot([e for e, _ in values], [v for _, v in values], label=label)
plt.legend()
plt.grid(which="both")
return ax
class ParamScheduler(BaseParamScheduler):
"""An abstract class for updating an optimizer's parameter value during
training.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use
Note:
Parameter scheduler works independently of the internal state of the attached optimizer.
More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
sets defined absolute values.
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(ParamScheduler, self).__init__(param_name, save_history)
if not (
isinstance(optimizer, Optimizer)
or (hasattr(optimizer, "param_groups") and isinstance(optimizer.param_groups, Sequence))
):
raise TypeError(
"Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, "
f"but given {type(optimizer)}"
)
self.optimizer = optimizer
self.param_group_index = param_group_index
self._state_attrs += ["param_group_index"]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
value = self.get_param()
if isinstance(value, list):
if len(value) != len(self.optimizer_param_groups):
raise ValueError(
"size of value is different than optimizer_param_groups "
f"{len(value)} != {len(self.optimizer_param_groups)}"
)
for i, param_group in enumerate(self.optimizer_param_groups):
param_group[self.param_name] = value[i]
else:
for i, param_group in enumerate(self.optimizer_param_groups):
param_group[self.param_name] = value
if name is None:
name = self.param_name
if self.save_history and engine:
if not hasattr(engine.state, "param_history") or engine.state.param_history is None:
setattr(engine.state, "param_history", {})
engine.state.param_history.setdefault(name, []) # type: ignore[attr-defined]
values = [pg[self.param_name] for pg in self.optimizer_param_groups]
engine.state.param_history[name].append(values) # type: ignore[attr-defined]
self.event_index += 1
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
if self.param_group_index is None:
return self.optimizer.param_groups
return [self.optimizer.param_groups[self.param_group_index]]
@classmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
Examples:
.. code-block:: python
lr_values = np.array(LinearCyclicalScheduler.simulate_values(num_events=50, param_name='lr',
start_value=1e-1, end_value=1e-3,
cycle_size=10))
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
plt.xlabel("events")
plt.ylabel("values")
plt.legend()
"""
keys_to_remove = ["optimizer", "save_history"]
for key in keys_to_remove:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(optimizer=_get_fake_optimizer(), save_history=False, **scheduler_kwargs)
for i in range(num_events):
scheduler(engine=None)
values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
return values
class CyclicalScheduler(ParamScheduler):
"""An abstract class for updating an optimizer's parameter value over a
cycle of some size.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the middle of the cycle.
cycle_size: length of cycle, value should be larger than 1.
cycle_mult: ratio by which to change the cycle_size.
at the end of each cycle (default=1.0).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
.. versionadded:: 0.4.5
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
start_value: float,
end_value: float,
cycle_size: int,
cycle_mult: float = 1.0,
start_value_mult: float = 1.0,
end_value_mult: float = 1.0,
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(CyclicalScheduler, self).__init__(
optimizer, param_name, save_history=save_history, param_group_index=param_group_index
)
self.start_value = start_value
self.end_value = end_value
self.cycle_size = int(cycle_size) # Ensure cycle_size is integer
self.cycle_mult = cycle_mult
self.cycle = 0
self.start_value_mult = start_value_mult
self.end_value_mult = end_value_mult
if self.cycle_size < 2:
raise ValueError(f"Argument cycle_size should be positive and larger than 1, but given {cycle_size}")
self._state_attrs += [
"start_value",
"end_value",
"cycle_size",
"cycle_mult",
"cycle",
"start_value_mult",
"end_value_mult",
]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
if self.event_index != 0 and self.event_index % self.cycle_size == 0:
self.event_index = 0
self.cycle_size = int(self.cycle_size * self.cycle_mult)
self.cycle += 1
self.start_value *= self.start_value_mult
self.end_value *= self.end_value_mult
return super(CyclicalScheduler, self).__call__(engine, name)
class LinearCyclicalScheduler(CyclicalScheduler):
"""Linearly adjusts param value to 'end_value' for a half-cycle, then linearly
adjusts it back to 'start_value' for a half-cycle.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the middle of the cycle.
cycle_size: length of cycle.
cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
# Linearly increases the learning rate from 0.0 to 1.0 and back to 0.0
# over a cycle of 4 iterations
scheduler = LinearCyclicalScheduler(default_optimizer, "lr", 0.0, 1.0, 4)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 1
0.0
0.5
1.0
0.5
...
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# Linearly increases the learning rate from 0.0 to 1.0 and back to 0.0
# over a cycle of 4 iterations
scheduler1 = LinearCyclicalScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)
# Linearly increases the learning rate from 0.0 to 0.1 and back to 0.0
# over a cycle of 4 iterations
scheduler2 = LinearCyclicalScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr (base)"],
optimizer.param_groups[1]["lr (fc)"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 2
0.0 0.0
0.5 0.05
1.0 0.1
0.5 0.05
...
.. versionadded:: 0.4.5
"""
def get_param(self) -> float:
cycle_progress = self.event_index / self.cycle_size
return self.end_value + (self.start_value - self.end_value) * abs(cycle_progress - 0.5) * 2
class CosineAnnealingScheduler(CyclicalScheduler):
"""Anneals 'start_value' to 'end_value' over each cycle.
The annealing takes the form of the first half of a cosine
wave (as suggested in [Smith17]_).
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the end of the cycle.
cycle_size: length of cycle.
cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler = CosineAnnealingScheduler(default_optimizer, "lr", 0.0, 1.0, 4)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 1
0.0
0.1464...
0.4999...
0.8535...
...
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)
# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr (base)"],
optimizer.param_groups[1]["lr (fc)"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 2
0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...
.. [Smith17] Smith, Leslie N. "Cyclical learning rates for training neural networks."
Applications of Computer Vision (WACV), 2017 IEEE Winter Conference on. IEEE, 2017
.. versionadded:: 0.4.5
"""
def get_param(self) -> float:
"""Method to get current optimizer's parameter value"""
cycle_progress = self.event_index / self.cycle_size
return self.start_value + ((self.end_value - self.start_value) / 2) * (1 - math.cos(math.pi * cycle_progress))
class ConcatScheduler(ParamScheduler):
"""Concat a list of parameter schedulers.
The `ConcatScheduler` goes through a list of schedulers given by `schedulers`. Duration of each
scheduler is defined by `durations` list of integers.
Args:
schedulers: list of parameter schedulers.
durations: list of number of events that lasts a parameter scheduler from schedulers.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
scheduler_1 = LinearCyclicalScheduler(default_optimizer, "lr", 0.0, 1.0, 8)
scheduler_2 = CosineAnnealingScheduler(default_optimizer, "lr", 1.0, 0.2, 4)
# Sets the Learning rate linearly from 0.0 to 1.0 over 4 iterations. Then
# starts an annealing schedule from 1.0 to 0.2 over the next 4 iterations.
# The annealing cycles are repeated indefinitely.
combined_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[4, ])
default_trainer.add_event_handler(Events.ITERATION_STARTED, combined_scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0
0.25
0.5
0.75
1.0
0.8828...
0.6000...
0.3171...
.. versionadded:: 0.4.5
"""
def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False):
if not isinstance(schedulers, Sequence):
raise TypeError(f"Argument schedulers should be a sequence, but given {schedulers}")
if len(schedulers) < 2:
raise ValueError(
f"Argument schedulers should be of more than one parameter schedulers, but given {schedulers}"
)
if not isinstance(durations, (list, tuple)):
raise TypeError(f"Argument durations should be list/tuple, but given {durations}")
if not all([isinstance(t, numbers.Integral) for t in durations]):
raise ValueError(f"Argument durations should be list/tuple of integers, but given {durations}")
if len(schedulers) != len(durations) + 1:
raise ValueError(
"Incorrect number schedulers or duration values, " f"given {len(schedulers)} and {len(durations)}"
)
for i, scheduler in enumerate(schedulers):
if not isinstance(scheduler, ParamScheduler) and not isinstance(scheduler, ParamGroupScheduler):
raise TypeError(
f"Value at index {i} of schedulers should be a parameter scheduler, but given {type(scheduler)}"
)
self.schedulers = schedulers
self.durations = durations
tmp_optimizers = [s.optimizer for s in self.schedulers]
tmp_list_optimizers = [s if isinstance(s, list) else [s] for s in tmp_optimizers]
param_optimizers = list(itertools.chain(*tmp_list_optimizers))
optimizer = list(set(param_optimizers))
if len(optimizer) != 1:
raise ValueError("schedulers should be related to same optimizer")
tmp_param_names = [s.param_name for s in self.schedulers]
tmp_list_param_names = [s if isinstance(s, list) else [s] for s in tmp_param_names]
param_names = list(itertools.chain(*tmp_list_param_names))
param_name = list(set(param_names))
if len(param_name) != 1:
raise ValueError("schedulers should be related to same param_name")
# schedulers should have save_history sync with ParamGroupScheduler
for s in schedulers:
s.save_history = save_history
super(ConcatScheduler, self).__init__(
optimizer=optimizer[0], param_name=param_name[0], save_history=save_history
)
self._scheduler_index = 0
self._setup_scheduler()
self._state_attrs += ["_current_duration", "durations", "_scheduler_index"]
def state_dict(self) -> Dict[str, Any]:
"""Returns a dictionary containing a whole state of ConcatScheduler.
Returns:
dict:
a dictionary containing a whole state of ConcatScheduler
"""
state_dict = super(ConcatScheduler, self).state_dict()
state_dict["schedulers"] = []
for s in self.schedulers:
state_dict["schedulers"].append(s.state_dict())
return state_dict
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ConcatScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
if "schedulers" not in state_dict:
raise ValueError(
f"Required state attribute 'schedulers' is absent in provided state_dict '{state_dict.keys()}'"
)
sds = state_dict["schedulers"]
if len(sds) != len(self.schedulers):
raise ValueError(
f"Input state_dict contains {len(sds)} state_dicts of concatenated schedulers, "
f"but {len(self.schedulers)} needed"
)
for s, sd in zip(self.schedulers, sds):
s.load_state_dict(sd)
super(ConcatScheduler, self).load_state_dict(state_dict)
self._setup_scheduler()
def _setup_scheduler(self) -> None:
self._current_scheduler = self.schedulers[self._scheduler_index]
self._current_duration = (
self.durations[self._scheduler_index] if self._scheduler_index < len(self.durations) else -1
)
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
if self._current_duration == 0:
self._scheduler_index += 1
self._setup_scheduler()
self._current_scheduler(engine, name)
self._current_duration -= 1
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
# We need to setup optimizer_param_groups as property
# to synchonize with the latest _current_scheduler and its internal optimizer_param_groups
return self._current_scheduler.optimizer_param_groups
@property
def save_history(self) -> bool:
return self._current_scheduler.save_history
@save_history.setter
def save_history(self, value: bool) -> None:
for s in self.schedulers:
s.save_history = value
def get_param(self) -> Union[List[float], float]:
return self._current_scheduler.get_param()
@classmethod
def simulate_values( # type: ignore[override]
cls,
num_events: int,
schedulers: List[ParamScheduler],
durations: List[int],
param_names: Optional[Union[List[str], Tuple[str]]] = None,
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
schedulers: list of parameter schedulers.
durations: list of number of events that lasts a parameter scheduler from schedulers.
param_names: parameter name or list of parameter names to simulate values.
By default, the first scheduler's parameter name is taken.
Returns:
list:
list of [event_index, value_0, value_1, ...], where values correspond to `param_names`.
"""
if param_names is not None:
if not isinstance(param_names, (list, tuple)):
raise TypeError(f"Argument param_names should be list or tuple, but given {type(param_names)}")
if not all(isinstance(item, str) for item in param_names):
raise ValueError(f"Argument param_names should be list or tuple of strings, but given {param_names}")
tmp_param_optimizers = [s.optimizer for s in schedulers]
tmp_list_param_optimizers = [s if isinstance(s, list) else [s] for s in tmp_param_optimizers]
param_optimizers = list(itertools.chain(*tmp_list_param_optimizers))
tmp_optimizer = list(set(param_optimizers))
if len(tmp_optimizer) != 1:
raise ValueError("schedulers should be related to same optimizer")
optimizer = tmp_optimizer[0]
# This scheduler uses `ParamScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)}
# all schedulers should be related to the same optimizer
objs["optimizer"] = optimizer.state_dict()
torch.save(objs, cache_filepath.as_posix())
# do not save_history
for s in schedulers:
s.save_history = False
output = []
scheduler = cls(schedulers=schedulers, save_history=False, durations=durations)
if param_names is None:
param_names = [scheduler.param_name]
for i in range(num_events):
scheduler(engine=None)
values = [i]
for param_name in param_names:
params = [p[param_name] for p in scheduler.optimizer_param_groups]
values = values + params
output.append(values)
objs = torch.load(cache_filepath.as_posix())
for i, s in enumerate(schedulers):
s.load_state_dict(objs[f"lr_scheduler_{i}"])
optimizer.load_state_dict(objs["optimizer"])
return output
class _CosineAnnealingWarmRestarts:
def __init__(self, lr_scheduler: CosineAnnealingWarmRestarts):
self._lr_scheduler = lr_scheduler
@property
def last_epoch(self) -> int:
return self._lr_scheduler.last_epoch
@last_epoch.setter
def last_epoch(self, value: int) -> None:
self._lr_scheduler.last_epoch = value
@property
def optimizer(self) -> torch.optim.Optimizer:
return self._lr_scheduler.optimizer
def get_lr(self, epoch: Optional[int] = None) -> List[float]:
T_mult = self._lr_scheduler.T_mult
eta_min = self._lr_scheduler.eta_min
if epoch is None and self.last_epoch < 0:
epoch = 0
if epoch is None:
epoch = self.last_epoch + 1
self._lr_scheduler.T_cur = self._lr_scheduler.T_cur + 1
if self._lr_scheduler.T_cur >= self._lr_scheduler.T_i:
self._lr_scheduler.T_cur = self._lr_scheduler.T_cur - self._lr_scheduler.T_i
self._lr_scheduler.T_i = self._lr_scheduler.T_i * T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self._lr_scheduler.T_0:
if T_mult == 1:
self._lr_scheduler.T_cur = epoch % self._lr_scheduler.T_0
else:
n = int(math.log((epoch / self._lr_scheduler.T_0 * (T_mult - 1) + 1), T_mult))
self._lr_scheduler.T_cur = epoch - self._lr_scheduler.T_0 * (T_mult**n - 1) / (T_mult - 1)
self._lr_scheduler.T_i = self._lr_scheduler.T_0 * T_mult**n
else:
self._lr_scheduler.T_i = self._lr_scheduler.T_0
self._lr_scheduler.T_cur = epoch
self.last_epoch = math.floor(epoch)
return [
eta_min
+ (base_lr - eta_min) * (1 + math.cos(math.pi * self._lr_scheduler.T_cur / self._lr_scheduler.T_i)) / 2
for base_lr in self._lr_scheduler.base_lrs
]
class LRScheduler(ParamScheduler):
"""A wrapper class to call `torch.optim.lr_scheduler` objects as `ignite` handlers.
Args:
lr_scheduler: lr_scheduler object to wrap.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
use_legacy: if True, scheduler should be attached to ``Events.ITERATION_COMPLETED``, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
from torch.optim.lr_scheduler import StepLR
torch_lr_scheduler = StepLR(default_optimizer, step_size=3, gamma=0.1)
scheduler = LRScheduler(torch_lr_scheduler)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.1
0.1
0.1
0.010...
0.010...
0.010...
0.001...
0.001...
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.9
added `use_legacy` argument
"""
def __init__(
self,
lr_scheduler: PyTorchLRScheduler,
save_history: bool = False,
use_legacy: bool = False,
):
if not isinstance(lr_scheduler, PyTorchLRScheduler):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__}, "
f"but given {type(lr_scheduler)}"
)
self.lr_scheduler: Union[PyTorchLRScheduler, _CosineAnnealingWarmRestarts] = lr_scheduler
if isinstance(lr_scheduler, CosineAnnealingWarmRestarts):
self.lr_scheduler = _CosineAnnealingWarmRestarts(lr_scheduler)
super(LRScheduler, self).__init__(
optimizer=self.lr_scheduler.optimizer,
param_name="lr",
save_history=save_history,
)
if use_legacy:
warnings.warn(
"Please make sure to attach scheduler to Events.ITERATION_COMPLETED "
"instead of Events.ITERATION_STARTED to make sure to use "
"the first lr value from the optimizer, otherwise it will be skipped"
)
self.lr_scheduler.last_epoch += 1
self._state_attrs += ["lr_scheduler"]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
super(LRScheduler, self).__call__(engine, name)
self.lr_scheduler.last_epoch += 1
def get_param(self) -> Union[float, List[float]]:
"""Method to get current optimizer's parameter value"""
# Emulate context manager for pytorch>=1.4
self.lr_scheduler._get_lr_called_within_step = True # type: ignore[union-attr]
lr_list = cast(List[float], self.lr_scheduler.get_lr())
self.lr_scheduler._get_lr_called_within_step = False # type: ignore[union-attr]
if len(lr_list) == 1:
return lr_list[0]
else:
return lr_list
@classmethod
def simulate_values( # type: ignore[override]
cls, num_events: int, lr_scheduler: PyTorchLRScheduler, **kwargs: Any
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
lr_scheduler: lr_scheduler object to wrap.
Returns:
event_index, value
"""
if not isinstance(lr_scheduler, PyTorchLRScheduler):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__}, "
f"but given {type(lr_scheduler)}"
)
# This scheduler uses `torch.optim.lr_scheduler.LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
obj = {
"lr_scheduler": lr_scheduler.state_dict(),
"optimizer": lr_scheduler.optimizer.state_dict(),
}
torch.save(obj, cache_filepath.as_posix())
values = []
scheduler = cls(save_history=False, lr_scheduler=lr_scheduler, **kwargs)
for i in range(num_events):
scheduler(engine=None)
params = [p[scheduler.param_name] for p in scheduler.optimizer_param_groups]
values.append([i] + params)
obj = torch.load(cache_filepath.as_posix())
lr_scheduler.load_state_dict(obj["lr_scheduler"])
lr_scheduler.optimizer.load_state_dict(obj["optimizer"])
return values
def create_lr_scheduler_with_warmup(
lr_scheduler: Union[ParamScheduler, PyTorchLRScheduler],
warmup_start_value: float,
warmup_duration: int,
warmup_end_value: Optional[float] = None,
save_history: bool = False,
output_simulated_values: Optional[List] = None,
) -> "ConcatScheduler":
"""
Helper method to create a learning rate scheduler with a linear warm-up.
Args:
lr_scheduler: learning rate scheduler after the warm-up.
warmup_start_value: learning rate start value of the warm-up phase.
warmup_duration: warm-up phase duration, number of events.
warmup_end_value: learning rate end value of the warm-up phase, (default=None). If None,
warmup_end_value is set to optimizer initial lr.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values: optional output of simulated learning rate values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated learning rate values.
Returns:
ConcatScheduler
Note:
If the first learning rate value provided by `lr_scheduler` is different from `warmup_end_value`, an additional
event is added after the warm-up phase such that the warm-up ends with `warmup_end_value` value and then
`lr_scheduler` provides its learning rate values as normally.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
from torch.optim.lr_scheduler import ExponentialLR
torch_lr_scheduler = ExponentialLR(optimizer=default_optimizer, gamma=0.98)
default_trainer = get_default_trainer()
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=3)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0
0.05
0.1
0.098
0.09604
0.09411...
0.09223...
0.09039...
.. versionadded:: 0.4.5
"""
if not isinstance(lr_scheduler, (ParamScheduler, PyTorchLRScheduler)):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__} or ParamScheduler, "
f"but given {type(lr_scheduler)}"
)
if not isinstance(warmup_duration, numbers.Integral):
raise TypeError(f"Argument warmup_duration should be integer, but given {warmup_duration}")
if not (warmup_duration > 1):
raise ValueError(f"Argument warmup_duration should be at least 2 events, but given {warmup_duration}")
warmup_schedulers: List[ParamScheduler] = []
for param_group_index, param_group in enumerate(lr_scheduler.optimizer.param_groups):
if warmup_end_value is None:
param_group_warmup_end_value = param_group["lr"]
else:
param_group_warmup_end_value = warmup_end_value
milestones_values = [(0, warmup_start_value), (warmup_duration - 1, param_group_warmup_end_value)]
if isinstance(lr_scheduler, PyTorchLRScheduler):
init_lr = param_group["lr"]
if init_lr != param_group_warmup_end_value:
milestones_values.append((warmup_duration, init_lr))
# We need to advance torch lr_scheduler to avoid duplicated lr value
# given by PiecewiseLinear and LRScheduler.
# We suggest to attach output scheduler on ITERATION_STARTED but
# torch lr_scheduler works with ITERATION_COMPLETED
# See also https://github.com/pytorch/ignite/pull/2496#issuecomment-1065984440
lr_scheduler.last_epoch += 1
lr_scheduler = LRScheduler(lr_scheduler, save_history=save_history)
else:
init_lr = lr_scheduler.get_param()
if init_lr == param_group_warmup_end_value:
if warmup_duration > 2:
d = (param_group_warmup_end_value - warmup_start_value) / (warmup_duration - 1)
milestones_values[-1] = (warmup_duration - 2, param_group_warmup_end_value - d)
else:
milestones_values.pop(-1)
warmup_schedulers.append(
PiecewiseLinear(
lr_scheduler.optimizer,
param_name="lr",
milestones_values=milestones_values,
param_group_index=param_group_index,
save_history=save_history,
)
)
warmup_scheduler = ParamGroupScheduler(warmup_schedulers, save_history=save_history)
schedulers: List[Union[ParamScheduler, ParamGroupScheduler, PyTorchLRScheduler]] = [
warmup_scheduler,
lr_scheduler,
]
durations = [milestones_values[-1][0] + 1]
combined_scheduler = ConcatScheduler(schedulers, durations=durations, save_history=save_history)
if output_simulated_values is not None:
if not isinstance(output_simulated_values, list):
raise TypeError(
"Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, "
f"but given {type(output_simulated_values)}."
)
num_events = len(output_simulated_values)
result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations)
for i in range(num_events):
output_simulated_values[i] = result[i]
return combined_scheduler
class PiecewiseLinear(ParamScheduler):
"""
Piecewise linear parameter scheduler
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
milestones_values: list of tuples (event index, parameter value)
represents milestones and parameter. Milestones should be increasing integers.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
.. code-block:: python
scheduler = PiecewiseLinear(optimizer, "lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)])
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
#
# Sets the learning rate to 0.5 over the first 10 iterations, then decreases linearly from 0.5 to 0.45 between
# 10th and 20th iterations. Next there is a jump to 0.3 at the 21st iteration and LR decreases linearly
# from 0.3 to 0.1 between 21st and 30th iterations and remains 0.1 until the end of the iterations.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
milestones_values = [(1, 1.0), (3, 0.8), (5, 0.2)]
scheduler = PiecewiseLinear(
default_optimizer, "lr", milestones_values=milestones_values)
# Sets lr equal to 1 for till the first iteration
# Then linearly reduces lr from 1 to 0.8 till the third iteration
# Then linearly reduces lr from 0.8 to 0.5 till the fifth iteration
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 6, max_epochs=1)
.. testoutput:: 1
1.0
1.0
0.9
0.8
0.5
0.2
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.1},
{"params": default_model.fc.parameters(), "lr": 1.0},
]
)
milestones_values1 = [(1, 0.1), (3, 0.08), (5, 0.02)]
scheduler2 = PiecewiseLinear(
optimizer, "lr", milestones_values=milestones_values1, param_group_index=0)
# Sets lr equal to 0.1 for till the first iteration
# Then linearly reduces lr from 0.1 to 0.08 till the third iteration
# Then linearly reduces lr from 0.08 to 0.05 till the fifth iteration
milestones_values2 = [(1, 1.0), (3, 0.8), (5, 0.2)]
scheduler1 = PiecewiseLinear(
optimizer, "lr", milestones_values=milestones_values2, param_group_index=1)
# Sets lr equal to 1 for till the first iteration
# Then linearly reduces lr from 1 to 0.8 till the third iteration
# Then linearly reduces lr from 0.8 to 0.5 till the fifth iteration
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"],
optimizer.param_groups[1]["lr"])
default_trainer.run([0] * 6, max_epochs=1)
.. testoutput:: 2
0.1 1.0
0.1 1.0
0.09 0.9
0.08 0.8
0.05 0.5
0.02 0.2
.. versionadded:: 0.4.5
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
milestones_values: List[Tuple[int, float]],
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(PiecewiseLinear, self).__init__(optimizer, param_name, save_history, param_group_index=param_group_index)
if not isinstance(milestones_values, Sequence):
raise TypeError(
f"Argument milestones_values should be a list or tuple, but given {type(milestones_values)}"
)
if len(milestones_values) < 1:
raise ValueError(
f"Argument milestones_values should be with at least one value, but given {milestones_values}"
)
values: List[float] = []
milestones: List[int] = []
for pair in milestones_values:
if not isinstance(pair, tuple) or len(pair) != 2:
raise ValueError("Argument milestones_values should be a list of pairs (milestone, param_value)")
if not isinstance(pair[0], numbers.Integral):
raise TypeError(f"Value of a milestone should be integer, but given {type(pair[0])}")
if len(milestones) > 0 and pair[0] < milestones[-1]:
raise ValueError(
f"Milestones should be increasing integers, but given {pair[0]} is smaller "
f"than the previous milestone {milestones[-1]}"
)
milestones.append(pair[0])
values.append(pair[1])
self.values = values
self.milestones = milestones
self._index = 0
self._state_attrs += ["values", "milestones", "_index"]
def _get_start_end(self) -> Tuple[int, int, float, float]:
if self.milestones[0] > self.event_index:
return self.event_index - 1, self.event_index, self.values[0], self.values[0]
elif self.milestones[-1] <= self.event_index:
return (self.event_index, self.event_index + 1, self.values[-1], self.values[-1])
elif self.milestones[self._index] <= self.event_index < self.milestones[self._index + 1]:
return (
self.milestones[self._index],
self.milestones[self._index + 1],
self.values[self._index],
self.values[self._index + 1],
)
else:
self._index += 1
return self._get_start_end()
def get_param(self) -> float:
start_index, end_index, start_value, end_value = self._get_start_end()
return start_value + (end_value - start_value) * (self.event_index - start_index) / (end_index - start_index)
class ParamGroupScheduler:
"""
Scheduler helper to group multiple schedulers into one.
Args:
schedulers: list/tuple of parameter schedulers.
names: list of names of schedulers.
save_history: whether to save history or not.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 1.0, 4, param_group_index=0)
# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 0.1, 4, param_group_index=1)
scheduler = ParamGroupScheduler(schedulers=[scheduler_1, scheduler_2],
names=["lr (base)", "lr (fc)"])
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"],
optimizer.param_groups[1]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...
.. versionadded:: 0.4.5
"""
def __init__(self, schedulers: List[ParamScheduler], names: Optional[List[str]] = None, save_history: bool = False):
if not isinstance(schedulers, Sequence):
raise TypeError(f"Argument schedulers should be a list/tuple, but given {schedulers}")
if not all(isinstance(scheduler, ParamScheduler) for scheduler in schedulers):
raise ValueError(
f"Argument schedulers should be a list/tuple of parameter schedulers, but given {schedulers}"
)
if names is None:
names = [s.param_name for s in schedulers]
if not isinstance(names, (list, tuple)):
raise TypeError(f"Argument names should be a list/tuple, but given {names}")
if not all(isinstance(n, str) for n in names):
raise ValueError(f"Argument names should be a list/tuple of parameter scheduler's names, but given {names}")
if len(names) != len(schedulers):
raise ValueError(f"{len(schedulers)} should be equal {len(names)}")
self.schedulers = schedulers
self.names = names
# schedulers should have save_history sync with ParamGroupScheduler
for s in schedulers:
s.save_history = save_history
self.optimizer = [s.optimizer for s in self.schedulers]
self.param_name = [s.param_name for s in self.schedulers]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
for scheduler, name in zip(self.schedulers, self.names):
scheduler(engine, name)
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
return [pg for scheduler in self.schedulers for pg in scheduler.optimizer_param_groups]
@property
def save_history(self) -> bool:
return self.schedulers[0].save_history
@save_history.setter
def save_history(self, value: bool) -> None:
for s in self.schedulers:
s.save_history = value
def state_dict(self) -> Dict[str, List[Any]]:
"""Returns a dictionary containing a whole state of ParamGroupScheduler.
Returns:
dict:
a dictionary containing a whole state of ParamGroupScheduler
"""
state_dict: Dict[str, List[Any]] = OrderedDict()
state_dict["schedulers"] = []
for n, s in zip(self.names, self.schedulers):
state_dict["schedulers"].append((n, s.state_dict()))
return state_dict
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ParamScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
if "schedulers" not in state_dict:
raise ValueError(
f"Required state attribute '{'schedulers'}' is absent in provided state_dict '{state_dict.keys()}'"
)
sds = state_dict["schedulers"]
if len(sds) != len(self.schedulers):
raise ValueError(
f"Input state_dict contains {len(sds)} state_dicts of param group schedulers, "
f"but {len(self.schedulers)} needed"
)
for req_n, s, (n, sd) in zip(self.names, self.schedulers, sds):
if req_n != n:
raise ValueError(
f"Name of scheduler from input state dict does not correspond to required one, {n} vs {req_n}"
)
s.load_state_dict(sd)
@classmethod
def simulate_values(
cls, num_events: int, schedulers: List[ParamScheduler], **kwargs: Any
) -> List[List[Union[List[float], float, int]]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
schedulers: lr_scheduler object to wrap.
kwargs: kwargs passed to construct an instance of
:class:`ignite.handlers.param_scheduler.ParamGroupScheduler`.
Returns:
list:
list of [event_index, scheduler_0_value, scheduler_1_value, ...], where scheduler_i_value
corresponds to the simulated param of scheduler i at 'event_index'th event.
"""
# This scheduler uses `torch.optim.lr_scheduler.LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)}
# all schedulers should be related to the same optimizer
objs["optimizer"] = schedulers[0].optimizer.state_dict()
torch.save(objs, cache_filepath.as_posix())
values = []
scheduler = cls(schedulers=schedulers, **kwargs)
for i in range(num_events):
params = [scheduler.get_param() for scheduler in schedulers]
values.append([i] + params)
scheduler(engine=None)
objs = torch.load(cache_filepath.as_posix())
for i, s in enumerate(schedulers):
s.load_state_dict(objs[f"lr_scheduler_{i}"])
s.optimizer.load_state_dict(objs["optimizer"])
return values
def get_param(self) -> List[Union[float, List[float]]]:
"""
Method to get current `schedulers`' parameter values
.. versionadded:: 0.4.11
"""
return [scheduler.get_param() for scheduler in self.schedulers]
class ReduceLROnPlateauScheduler(ParamScheduler):
"""Reduce LR when a metric stops improving.
Wrapper of `torch.optim.lr_scheduler.ReduceLROnPlateau
<https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ReduceLROnPlateau.html>`_.
Args:
optimizer: Wrapped optimizer.
metric_name: metric whose improvement is monitored.
Must be attached to the same engine.
trainer: Trainer engine to log LR history in its
`state.output.param_history`. Is used if `save_history`
is true. Default: None.
save_history: Whether to save history or not. If true,
history will be logged in `trainer`'s `state.output.param_history`.
Default: False.
param_group_index: `optimizer`'s parameters group
to use. Default: None. Use all `optimizer`'s paramater groups.
scheduler_kwargs: Keyword arguments to be passed to the wrapped ``ReduceLROnPlateau``.
Examples:
.. code-block:: python
# Metric "accuracy" should increase the best value by
# more than 1 unit after at most 2 epochs, otherwise LR
# would get multiplied by 0.5 .
scheduler = ReduceLROnPlateauScheduler(
default_optimizer,
metric_name="accuracy", mode="max",
factor=0.5, patience=1, threshold_mode='abs',
threshold=1, trainer=trainer
)
metric = Accuracy()
default_evaluator.attach(metric, "accuracy")
default_evaluator.add_event_handler(Events.COMPLETED, scheduler)
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
# Metric "loss" should decrease more than
# 0.1 of best loss after at most
# three iterations. Then best loss would get
# updated, otherwise lr is multiplied by 0.5
scheduler = ReduceLROnPlateauScheduler(
default_optimizer, "loss",
save_history=True, mode="min",
factor=0.5, patience=3, threshold_mode='rel',
threshold=0.1, trainer=default_trainer
)
metric_values = iter([10, 5, 3, 4, 4, 4, 5, 1])
default_evaluator.state.metrics = {"loss": None}
@default_trainer.on(Events.ITERATION_COMPLETED)
def set_metric_val():
default_evaluator.state.metrics["loss"] = next(metric_values)
default_evaluator.add_event_handler(Events.COMPLETED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def trigger_eval():
default_evaluator.run([0.])
default_trainer.run([0.] * 8)
print(default_trainer.state.param_history["lr"])
.. testoutput::
[[0.1], [0.1], [0.1], [0.1], [0.1], [0.1], [0.05], [0.05]]
.. versionadded:: 0.4.9
"""
def __init__(
self,
optimizer: Optimizer,
metric_name: str,
trainer: Optional[Engine] = None,
save_history: bool = False,
param_group_index: Optional[int] = None,
**scheduler_kwargs: Any,
):
super(ReduceLROnPlateauScheduler, self).__init__(
optimizer, "lr", save_history=save_history, param_group_index=param_group_index
)
self.metric_name = metric_name
self.trainer = trainer
self.optimizer = optimizer
if "min_lr" in scheduler_kwargs and param_group_index is not None:
min_lr = scheduler_kwargs["min_lr"]
if not isinstance(min_lr, float):
raise TypeError(f"When param_group_index is given, min_lr should be a float, but given {type(min_lr)}")
_min_lr = min_lr
min_lr = [0] * len(optimizer.param_groups)
min_lr[param_group_index] = _min_lr
else:
min_lr = 0
_scheduler_kwargs = scheduler_kwargs.copy()
_scheduler_kwargs["min_lr"] = min_lr
if "verbose" in _scheduler_kwargs:
warnings.warn(
"Found verbose=True in provided scheduler_kwargs. "
"It would be set to False. Please use save_history instead."
)
_scheduler_kwargs["verbose"] = False
self.scheduler = ReduceLROnPlateau(optimizer, **_scheduler_kwargs)
self.scheduler._reduce_lr = self._reduce_lr # type: ignore[attr-defined]
self._state_attrs += ["metric_name", "scheduler"]
def __call__(self, engine: Engine, name: Optional[str] = None) -> None: # type: ignore[override]
if not hasattr(engine.state, "metrics") or self.metric_name not in engine.state.metrics:
raise ValueError(
"Argument engine should have in its 'state', attribute 'metrics' "
f"which itself has the metric {self.metric_name}."
)
self.scheduler.step(engine.state.metrics[self.metric_name])
super().__call__(self.trainer, name)
def get_param(self) -> Union[float, List[float]]:
lrs = [pg["lr"] for pg in self.optimizer_param_groups]
return lrs[0] if len(lrs) == 1 else lrs
def _reduce_lr(self, epoch: int) -> None:
for i, param_group in enumerate(self.optimizer_param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.scheduler.factor, self.scheduler.min_lrs[i])
if old_lr - new_lr > self.scheduler.eps:
param_group["lr"] = new_lr
@classmethod
def simulate_values( # type: ignore[override]
cls, num_events: int, metric_values: List[float], init_lr: float, **scheduler_kwargs: Any
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
metric_values: values to change LR based on.
init_lr: initial LR to start with.
scheduler_kwargs: kwargs passed to construct an instance of
:class:`ignite.handlers.param_scheduler.ReduceLROnPlateauScheduler`.
Returns:
event_index, value
"""
if len(metric_values) != num_events:
raise ValueError(
"Length of argument metric_values should be equal to num_events. "
f"{len(metric_values)} != {num_events}"
)
keys_to_remove = ["optimizer", "metric_name", "save_history"]
for key in keys_to_remove:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(
optimizer=_get_fake_optimizer(torch.optim.SGD, lr=init_lr),
metric_name="metric",
save_history=False,
**scheduler_kwargs,
)
engine = Engine(lambda _, __: None)
for i in range(num_events):
engine.state.metrics["metric"] = metric_values[i]
scheduler(engine=engine)
values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
return values
def _get_fake_optimizer(
optimizer_cls: Optional[Union[Type[Optimizer], Type[torch.optim.SGD]]] = None, **kwargs: Any
) -> Union[Optimizer, torch.optim.SGD]:
t = torch.zeros([1], requires_grad=True)
if optimizer_cls is None:
optimizer_cls = torch.optim.SGD
kwargs["lr"] = 0.01
return optimizer_cls([t], **kwargs)
|
import collections.abc as collections
import numbers
import os
import stat
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, NamedTuple, Optional, Tuple, Union
import torch
import torch.nn as nn
from packaging.version import Version
if Version(torch.__version__) >= Version("1.9.0"):
from torch.distributed.optim import ZeroRedundancyOptimizer
HAVE_ZERO = True
else:
HAVE_ZERO = False
import ignite.distributed as idist
from ignite.base import Serializable
from ignite.engine import Engine, Events
__all__ = ["Checkpoint", "DiskSaver", "ModelCheckpoint", "BaseSaveHandler"]
class BaseSaveHandler(metaclass=ABCMeta):
"""Base class for save handlers
Methods to override:
- :meth:`~ignite.handlers.checkpoint.BaseSaveHandler.__call__`
- :meth:`~ignite.handlers.checkpoint.BaseSaveHandler.remove`
Note:
In derived class, please, make sure that in distributed configuration overridden methods are called by a single
process. Distributed configuration on XLA devices should be treated slightly differently: for saving checkpoint
with `xm.save() <https://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.save>`_ all processes
should pass into the function. Otherwise, application gets stuck.
"""
@abstractmethod
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
"""Method to save `checkpoint` with `filename`. Additionally, metadata dictionary is provided.
Metadata contains:
- `basename`: file prefix (if provided) with checkpoint name, e.g. `epoch_checkpoint`.
- `score_name`: score name if provided, e.g `val_acc`.
- `priority`: checkpoint priority value (higher is better), e.g. `12` or `0.6554435`
Args:
checkpoint: checkpoint dictionary to save.
filename: filename associated with checkpoint.
metadata: metadata on checkpoint to save.
"""
@abstractmethod
def remove(self, filename: str) -> None:
"""Method to remove saved checkpoint.
Args:
filename: filename associated with checkpoint.
"""
class Checkpoint(Serializable):
"""Checkpoint handler can be used to periodically save and load objects which have attribute
``state_dict/load_state_dict``. This class can use specific save handlers to store on the disk or a cloud
storage, etc. The Checkpoint handler (if used with :class:`~ignite.handlers.DiskSaver`) also handles automatically
moving data on TPU to CPU before writing the checkpoint.
Args:
to_save: Dictionary with the objects to save. Objects should have implemented ``state_dict`` and
``load_state_dict`` methods. If contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, their internal wrapped model is automatically saved (to avoid additional key ``module.`` in
the state dictionary).
save_handler: String, function or callable object.
used to save engine and other provided objects. Function receives two objects: checkpoint as a dictionary
and filename. If ``save_handler`` is callable class, it can
inherit of :class:`~ignite.handlers.checkpoint.BaseSaveHandler` and optionally implement ``remove`` method
to keep a fixed number of saved checkpoints. In case if user needs to save engine's checkpoint on a disk,
``save_handler`` can be defined with :class:`~ignite.handlers.DiskSaver` or a string specifying
directory name can be passed to ``save_handler``.
filename_prefix: Prefix for the file name to which objects will be saved. See Note for details.
score_function: If not None, it should be a function taking a single argument,
:class:`~ignite.engine.engine.Engine` object, and returning a score (`float`). Objects with highest scores
will be retained.
score_name: If ``score_function`` not None, it is possible to store its value using
``score_name``. If ``score_function`` is None, ``score_name`` can be used alone to define ``score_function``
as ``Checkpoint.get_default_score_fn(score_name)`` by default.
n_saved: Number of objects that should be kept on disk. Older files will be removed. If set to
`None`, all objects are kept.
global_step_transform: global step transform function to output a desired global step.
Input of the function is ``(engine, event_name)``. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided, uses function output as global_step.
To setup global step from another engine, please use :meth:`~ignite.handlers.global_step_from_engine`.
filename_pattern: If ``filename_pattern`` is provided, this pattern will be used to render
checkpoint filenames. If the pattern is not defined, the default pattern would be used. See Note for
details.
include_self: Whether to include the `state_dict` of this object in the checkpoint. If `True`, then
there must not be another object in ``to_save`` with key ``checkpointer``.
greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise, the first model.
Default, `False`.
save_on_rank: Which rank to save the objects on, in the distributed configuration. If ``save_handler`` is
string or :class:`~pathlib.Path`, this is also used to instantiate a :class:`~ignite.handlers.DiskSaver`.
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
Note:
This class stores a single file as a dictionary of provided objects to save.
The filename is defined by ``filename_pattern`` and by default has the following
structure: ``{filename_prefix}_{name}_{suffix}.{ext}`` where
- ``filename_prefix`` is the argument passed to the constructor,
- `name` is the key in ``to_save`` if a single object is to store, otherwise `name` is "checkpoint".
- `suffix` is composed as following ``{global_step}_{score_name}={score}``.
+----------------+------------+-----------------------+----------------------------------------------+
| score_function | score_name | global_step_transform | suffix |
+================+============+=======================+==============================================+
| None | None | None | ``{engine.state.iteration}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | None | None | ``{score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | None | X | ``{global_step}_{score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | X | X | ``{global_step}_{score_name}={score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| None | None | X | ``{global_step}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | X | None | ``{score_name}={score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
Above `global_step` defined by the output of `global_step_transform` and `score` defined by the output
of `score_function`.
By default, none of ``score_function``, ``score_name``, ``global_step_transform`` is defined, then suffix is
setup by attached engine's current iteration. The filename will be
`{filename_prefix}_{name}_{engine.state.iteration}.{ext}`.
For example, ``score_name="neg_val_loss"`` and ``score_function`` that returns `-loss` (as objects with highest
scores will be retained), then saved filename will be ``{filename_prefix}_{name}_neg_val_loss=-0.1234.pt``.
Note:
If ``filename_pattern`` is given, it will be used to render the filenames. ``filename_pattern`` is a string
that can contain ``{filename_prefix}``, ``{name}``, ``{score}``, ``{score_name}`` and ``{global_step}`` as
templates.
For example, let ``filename_pattern="{global_step}-{name}-{score}.pt"`` then the saved filename will be
``30000-checkpoint-94.pt``
**Warning:** Please, keep in mind that if filename collide with already used one to saved a checkpoint,
new checkpoint will replace the older one. This means that filename like ``checkpoint.pt`` will be saved
every call and will always be overwritten by newer checkpoints.
Note:
To get the last stored filename, handler exposes attribute ``last_checkpoint``:
.. code-block:: python
handler = Checkpoint(...)
...
print(handler.last_checkpoint)
> checkpoint_12345.pt
Note:
This class is distributed configuration-friendly: it is not required to instantiate the class in rank 0 only
process. This class supports automatically distributed configuration and if used with
:class:`~ignite.handlers.DiskSaver`, checkpoint is stored by rank 0 process.
.. warning::
When running on XLA devices or using :class:`~torch.distributed.optim.ZeroRedundancyOptimizer`, it
should be run in all processes, otherwise application can get stuck while saving the checkpoint.
.. code-block:: python
# Wrong:
# if idist.get_rank() == 0:
# handler = Checkpoint(...)
# trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
# Correct:
handler = Checkpoint(...)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
Examples:
Attach the handler to make checkpoints during training:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint
trainer = ...
model = ...
optimizer = ...
lr_scheduler = ...
to_save = {'model': model, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler, 'trainer': trainer}
if (checkpoint_iters):
# A: Output is "checkpoint_<iteration>.pt"
handler = Checkpoint(
to_save, '/tmp/models', n_saved=2
)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
else:
# B:Output is "checkpoint_<epoch>.pt"
gst = lambda *_: trainer.state.epoch
handler = Checkpoint(
to_save, '/tmp/models', n_saved=2, global_step_transform=gst
)
trainer.add_event_handler(Events.EPOCH_COMPLETED, handler)
trainer.run(data_loader, max_epochs=6)
> A: ["checkpoint_7000.pt", "checkpoint_8000.pt", ]
> B: ["checkpoint_5.pt", "checkpoint_6.pt", ]
Attach the handler to an evaluator to save best model during the training
according to computed validation metric:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, global_step_from_engine
trainer = ...
evaluator = ...
# Setup Accuracy metric computation on evaluator.
# evaluator.state.metrics contain 'accuracy',
# which will be used to define ``score_function`` automatically.
# Run evaluation on epoch completed event
# ...
to_save = {'model': model}
handler = Checkpoint(
to_save, '/tmp/models',
n_saved=2, filename_prefix='best',
score_name="accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, handler)
trainer.run(data_loader, max_epochs=10)
> ["best_model_9_accuracy=0.77.pt", "best_model_10_accuracy=0.78.pt", ]
Customise the ``save_handler``:
.. code-block:: python
handler = Checkpoint(
to_save, save_handler=DiskSaver('/tmp/models', create_dir=True, **kwargs), n_saved=2
)
.. versionchanged:: 0.4.3
- Checkpoint can save model with same filename.
- Added ``greater_or_equal`` argument.
.. versionchanged:: 0.4.7
- `score_name` can be used to define `score_function` automatically without providing `score_function`.
- `save_handler` automatically saves to disk if path to directory is provided.
- `save_on_rank` saves objects on this rank in a distributed configuration.
"""
Item = NamedTuple("Item", [("priority", int), ("filename", str)])
_state_dict_all_req_keys = ("saved",)
def __init__(
self,
to_save: Mapping,
save_handler: Union[str, Path, Callable, BaseSaveHandler],
filename_prefix: str = "",
score_function: Optional[Callable] = None,
score_name: Optional[str] = None,
n_saved: Union[int, None] = 1,
global_step_transform: Optional[Callable] = None,
filename_pattern: Optional[str] = None,
include_self: bool = False,
greater_or_equal: bool = False,
save_on_rank: int = 0,
):
if not isinstance(to_save, collections.Mapping):
raise TypeError(f"Argument `to_save` should be a dictionary, but given {type(to_save)}")
self._check_objects(to_save, "state_dict")
if include_self:
if not isinstance(to_save, collections.MutableMapping):
raise TypeError(
f"If `include_self` is True, then `to_save` must be mutable, but given {type(to_save)}."
)
if "checkpointer" in to_save:
raise ValueError(f"Cannot have key 'checkpointer' if `include_self` is True: {to_save}")
if not (
isinstance(save_handler, str)
or isinstance(save_handler, Path)
or callable(save_handler)
or isinstance(save_handler, BaseSaveHandler)
):
raise TypeError(
"Argument `save_handler` should be a string or Path object or callable or inherit from BaseSaveHandler"
)
if global_step_transform is not None and not callable(global_step_transform):
raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.")
self.to_save = to_save
self.filename_prefix = filename_prefix
if isinstance(save_handler, str) or isinstance(save_handler, Path):
self.save_handler = DiskSaver(save_handler, create_dir=True, save_on_rank=save_on_rank)
else:
self.save_handler = save_handler # type: ignore
self.score_function = score_function
self.score_name = score_name
if self.score_name is not None and self.score_function is None:
self.score_function = self.get_default_score_fn(self.score_name)
self.n_saved = n_saved
self.ext = "pt"
self.global_step_transform = global_step_transform
self.filename_pattern = filename_pattern
self._saved: List["Checkpoint.Item"] = []
self.include_self = include_self
self.greater_or_equal = greater_or_equal
self.save_on_rank = save_on_rank
def _get_filename_pattern(self, global_step: Optional[int]) -> str:
if self.filename_pattern is None:
filename_pattern = self.setup_filename_pattern(
with_prefix=len(self.filename_prefix) > 0,
with_score=self.score_function is not None,
with_score_name=self.score_name is not None,
with_global_step=global_step is not None,
)
else:
filename_pattern = self.filename_pattern
return filename_pattern
def reset(self) -> None:
"""Method to reset saved checkpoint names.
Use this method if the engine will independently run multiple times:
.. code-block:: python
from ignite.handlers import Checkpoint
trainer = ...
checkpointer = Checkpoint(...)
trainer.add_event_handler(Events.COMPLETED, checkpointer)
trainer.add_event_handler(Events.STARTED, checkpointer.reset)
# fold 0
trainer.run(data0, max_epochs=max_epochs)
print("Last checkpoint:", checkpointer.last_checkpoint)
# fold 1
trainer.run(data1, max_epochs=max_epochs)
print("Last checkpoint:", checkpointer.last_checkpoint)
.. versionadded:: 0.4.3
"""
self._saved = []
@property
def last_checkpoint(self) -> Optional[Union[str, Path]]:
if len(self._saved) < 1:
return None
if not isinstance(self.save_handler, DiskSaver):
return self._saved[-1].filename
return self.save_handler.dirname / self._saved[-1].filename
def _check_lt_n_saved(self, or_equal: bool = False) -> bool:
if self.n_saved is None:
return True
return len(self._saved) < self.n_saved + int(or_equal)
def _compare_fn(self, new: Union[int, float]) -> bool:
if self.greater_or_equal:
return new >= self._saved[0].priority
else:
return new > self._saved[0].priority
def __call__(self, engine: Engine) -> None:
global_step = None
if self.global_step_transform is not None:
global_step = self.global_step_transform(engine, engine.last_event_name)
if self.score_function is not None:
priority = self.score_function(engine)
if not isinstance(priority, numbers.Number):
raise ValueError("Output of score_function should be a number")
else:
if global_step is None:
global_step = engine.state.get_event_attrib_value(Events.ITERATION_COMPLETED)
priority = global_step
if self._check_lt_n_saved() or self._compare_fn(priority):
priority_str = f"{priority}" if isinstance(priority, numbers.Integral) else f"{priority:.4f}"
checkpoint = self._setup_checkpoint()
name = "checkpoint"
if len(checkpoint) == 1:
for k in checkpoint:
name = k
checkpoint = checkpoint[name]
filename_pattern = self._get_filename_pattern(global_step)
filename_dict = {
"filename_prefix": self.filename_prefix,
"ext": self.ext,
"name": name,
"score_name": self.score_name,
"score": priority_str if (self.score_function is not None) else None,
"global_step": global_step,
}
filename = filename_pattern.format(**filename_dict)
metadata = {
"basename": f"{self.filename_prefix}{'_' * int(len(self.filename_prefix) > 0)}{name}",
"score_name": self.score_name,
"priority": priority,
}
try:
index = list(map(lambda it: it.filename == filename, self._saved)).index(True)
to_remove = True
except ValueError:
index = 0
to_remove = not self._check_lt_n_saved()
if to_remove:
item = self._saved.pop(index)
if isinstance(self.save_handler, BaseSaveHandler):
self.save_handler.remove(item.filename)
self._saved.append(Checkpoint.Item(priority, filename))
self._saved.sort(key=lambda it: it[0])
if self.include_self:
# Now that we've updated _saved, we can add our own state_dict.
checkpoint["checkpointer"] = self.state_dict()
try:
self.save_handler(checkpoint, filename, metadata)
except TypeError:
self.save_handler(checkpoint, filename)
def _setup_checkpoint(self) -> Dict[str, Dict[Any, Any]]:
checkpoint = {}
if self.to_save is not None:
for k, obj in self.to_save.items():
if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
obj = obj.module
elif HAVE_ZERO and isinstance(obj, ZeroRedundancyOptimizer):
obj.consolidate_state_dict(to=self.save_on_rank)
if self.save_on_rank != idist.get_rank():
continue
checkpoint[k] = obj.state_dict()
return checkpoint
@staticmethod
def setup_filename_pattern(
with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True
) -> str:
"""Helper method to get the default filename pattern for a checkpoint.
Args:
with_prefix: If True, the ``filename_prefix`` is added to the filename pattern:
``{filename_prefix}_{name}...``. Default, True.
with_score: If True, ``score`` is added to the filename pattern: ``..._{score}.{ext}``.
Default, True. At least one of ``with_score`` and ``with_global_step`` should be True.
with_score_name: If True, ``score_name`` is added to the filename pattern:
``..._{score_name}={score}.{ext}``. If activated, argument ``with_score`` should be
also True, otherwise an error is raised. Default, True.
with_global_step: If True, ``{global_step}`` is added to the
filename pattern: ``...{name}_{global_step}...``.
At least one of ``with_score`` and ``with_global_step`` should be True.
Examples:
.. code-block:: python
from ignite.handlers import Checkpoint
filename_pattern = Checkpoint.setup_filename_pattern()
print(filename_pattern)
> "{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}"
.. versionadded:: 0.4.3
"""
filename_pattern = "{name}"
if not (with_global_step or with_score):
raise ValueError("At least one of with_score and with_global_step should be True.")
if with_global_step:
filename_pattern += "_{global_step}"
if with_score_name and with_score:
filename_pattern += "_{score_name}={score}"
elif with_score:
filename_pattern += "_{score}"
elif with_score_name:
raise ValueError("If with_score_name is True, with_score should be also True")
if with_prefix:
filename_pattern = "{filename_prefix}_" + filename_pattern
filename_pattern += ".{ext}"
return filename_pattern
@staticmethod
def _check_objects(objs: Mapping, attr: str) -> None:
for k, obj in objs.items():
if not hasattr(obj, attr):
raise TypeError(f"Object {type(obj)} should have `{attr}` method")
@staticmethod
def load_objects(to_load: Mapping, checkpoint: Union[str, Mapping, Path], **kwargs: Any) -> None:
"""Helper method to apply ``load_state_dict`` on the objects from ``to_load`` using states from ``checkpoint``.
Args:
to_load: a dictionary with objects, e.g. `{"model": model, "optimizer": optimizer, ...}`
checkpoint: a path, a string filepath or a dictionary with state_dicts to load, e.g.
`{"model": model_state_dict, "optimizer": opt_state_dict}`. If `to_load` contains a single key,
then checkpoint can contain directly corresponding state_dict.
kwargs: Keyword arguments accepted for `nn.Module.load_state_dict()`. Passing `strict=False` enables
the user to load part of the pretrained model (useful for example, in Transfer Learning)
Examples:
.. code-block:: python
import tempfile
from pathlib import Path
import torch
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Checkpoint
trainer = Engine(lambda engine, batch: None)
with tempfile.TemporaryDirectory() as tmpdirname:
handler = ModelCheckpoint(tmpdirname, 'myprefix', n_saved=None, create_dir=True)
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
to_save = {"weights": model, "optimizer": optimizer}
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), handler, to_save)
trainer.run(torch.randn(10, 1), 5)
to_load = to_save
checkpoint_fp = Path(tmpdirname) / 'myprefix_checkpoint_40.pt'
checkpoint = torch.load(checkpoint_fp)
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint)
# or using a string for checkpoint filepath
to_load = to_save
checkpoint_fp = Path(tmpdirname) / 'myprefix_checkpoint_40.pt'
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint_fp)
Note:
If ``to_load`` contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, method ``load_state_dict`` will applied to their internal wrapped model (``obj.module``).
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
"""
if isinstance(checkpoint, (str, Path)):
checkpoint_obj = torch.load(checkpoint)
else:
checkpoint_obj = checkpoint
Checkpoint._check_objects(to_load, "load_state_dict")
if not isinstance(checkpoint, (collections.Mapping, str, Path)):
raise TypeError(f"Argument checkpoint should be a string or a dictionary, but given {type(checkpoint)}")
if len(kwargs) > 1 or any(k for k in kwargs if k not in ["strict"]):
warnings.warn("kwargs contains keys other than strict and these will be ignored")
is_state_dict_strict = kwargs.get("strict", True)
def _load_object(obj: Any, chkpt_obj: Any) -> None:
if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
obj = obj.module
if isinstance(obj, torch.nn.Module):
obj.load_state_dict(chkpt_obj, strict=is_state_dict_strict)
else:
obj.load_state_dict(chkpt_obj)
if len(to_load) == 1:
# single object and checkpoint is directly a state_dict
key, obj = list(to_load.items())[0]
if key not in checkpoint_obj:
_load_object(obj, checkpoint_obj)
return
# multiple objects to load
for k, obj in to_load.items():
if k not in checkpoint_obj:
raise ValueError(f"Object labeled by '{k}' from `to_load` is not found in the checkpoint")
_load_object(obj, checkpoint_obj[k])
def reload_objects(self, to_load: Mapping, load_kwargs: Optional[Dict] = None, **filename_components: Any) -> None:
"""Helper method to apply ``load_state_dict`` on the objects from ``to_load``. Filename components such as
name, score and global state can be configured.
Args:
to_load: a dictionary with objects, e.g. `{"model": model, "optimizer": optimizer, ...}`
load_kwargs: Keyword arguments accepted for `nn.Module.load_state_dict()`. Passing `strict=False` enables
the user to load part of the pretrained model (useful for example, in Transfer Learning)
filename_components: Filename components used to define the checkpoint file path.
Keyword arguments accepted are `name`, `score` and `global_state`.
Examples:
.. code-block:: python
import tempfile
import torch
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
trainer = Engine(lambda engine, batch: None)
with tempfile.TemporaryDirectory() as tmpdirname:
checkpoint = ModelCheckpoint(tmpdirname, 'myprefix', n_saved=None, create_dir=True)
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
to_save = {"weights": model, "optimizer": optimizer}
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), checkpoint, to_save)
trainer.run(torch.randn(10, 1), 5)
to_load = to_save
# load checkpoint myprefix_checkpoint_40.pt
checkpoint.reload_objects(to_load=to_load, global_step=40)
Note:
If ``to_load`` contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, method ``load_state_dict`` will applied to their internal wrapped model (``obj.module``).
Note:
This method works only when the ``save_handler`` is of types string,
:class:`~pathlib.Path` or :class:`~ignite.handlers.checkpoint.DiskSaver`.
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
"""
if not isinstance(self.save_handler, DiskSaver):
raise AttributeError(
f"Checkpoint's `save_handler` should be of type `DiskSaver`, given {type(self.save_handler)}"
)
global_step = filename_components.get("global_step", None)
filename_pattern = self._get_filename_pattern(global_step)
checkpoint = self._setup_checkpoint()
name = "checkpoint"
if len(checkpoint) == 1:
for k in checkpoint:
name = k
name = filename_components.get("name", name)
score = filename_components.get("score", None)
filename_dict = {
"filename_prefix": self.filename_prefix,
"ext": self.ext,
"name": name,
"score_name": self.score_name,
"score": score,
"global_step": global_step,
}
checkpoint_fp = filename_pattern.format(**filename_dict)
path = self.save_handler.dirname / checkpoint_fp
load_kwargs = {} if load_kwargs is None else load_kwargs
Checkpoint.load_objects(to_load=to_load, checkpoint=path, **load_kwargs)
def state_dict(self) -> "OrderedDict[str, List[Tuple[int, str]]]":
"""Method returns state dict with saved items: list of ``(priority, filename)`` pairs.
Can be used to save internal state of the class.
"""
return OrderedDict([("saved", [(p, f) for p, f in self._saved])])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replaces internal state of the class with provided state dict data.
Args:
state_dict: a dict with "saved" key and list of ``(priority, filename)`` pairs as values.
"""
super().load_state_dict(state_dict)
self._saved = [Checkpoint.Item(p, f) for p, f in state_dict["saved"]]
@staticmethod
def get_default_score_fn(metric_name: str, score_sign: float = 1.0) -> Callable:
"""Helper method to get default score function based on the metric name.
Args:
metric_name: metric name to get the value from ``engine.state.metrics``.
Engine is the one to which :class:`~ignite.handlers.checkpoint.Checkpoint` handler is added.
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
Examples:
.. code-block:: python
from ignite.handlers import Checkpoint
best_acc_score = Checkpoint.get_default_score_fn("accuracy")
best_model_handler = Checkpoint(
to_save, save_handler, score_name="val_accuracy", score_function=best_acc_score
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
Usage with error-like metric:
.. code-block:: python
from ignite.handlers import Checkpoint
neg_loss_score = Checkpoint.get_default_score_fn("loss", -1.0)
best_model_handler = Checkpoint(
to_save, save_handler, score_name="val_neg_loss", score_function=neg_loss_score
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
.. versionadded:: 0.4.3
"""
if score_sign not in (1.0, -1.0):
raise ValueError("Argument score_sign should be 1 or -1")
def wrapper(engine: Engine) -> float:
return score_sign * engine.state.metrics[metric_name]
return wrapper
class DiskSaver(BaseSaveHandler):
"""Handler that saves input checkpoint on a disk.
Args:
dirname: Directory path where the checkpoint will be saved
atomic: if True, checkpoint is serialized to a temporary file, and then
moved to final destination, so that files are guaranteed to not be damaged
(for example if exception occurs during saving).
create_dir: if True, will create directory ``dirname`` if it doesnt exist.
require_empty: If True, will raise exception if there are any files in the
directory ``dirname``.
save_on_rank: The rank on which the checkpoint will be saved. Used in distributed
configuration.
kwargs: Accepted keyword arguments for `torch.save` or `xm.save`.
.. versionchanged:: 0.4.2
Accept ``kwargs`` for `torch.save` or `xm.save`.
.. versionchanged:: 0.4.10
Argument ``save_on_rank`` was added to specify the rank on which checkpoint should be saved.
"""
def __init__(
self,
dirname: Union[str, Path],
atomic: bool = True,
create_dir: bool = True,
require_empty: bool = True,
save_on_rank: int = 0,
**kwargs: Any,
):
self.dirname = Path(dirname).expanduser()
self._atomic = atomic
self.save_on_rank = save_on_rank
if idist.get_rank() == save_on_rank:
self._check_and_setup(self.dirname, create_dir, require_empty)
self.kwargs = kwargs
@staticmethod
def _check_and_setup(dirname: Path, create_dir: bool, require_empty: bool) -> None:
if create_dir:
if not dirname.exists():
dirname.mkdir(parents=True)
# Ensure that dirname exists
if not dirname.exists():
raise ValueError(f"Directory path '{dirname}' is not found")
if require_empty:
matched = [fname for fname in os.listdir(dirname) if fname.endswith(".pt")]
if len(matched) > 0:
raise ValueError(
f"Files {matched} with extension '.pt' are already present "
f"in the directory {dirname}. If you want to use this "
"directory anyway, pass `require_empty=False`."
""
)
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
path = self.dirname / filename
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
# all tpu procs should enter here as internally performs sync across device
self._save_func(checkpoint, path, xm.save)
elif self.save_on_rank == idist.get_rank():
self._save_func(checkpoint, path, torch.save)
def _save_func(self, checkpoint: Mapping, path: Path, func: Callable) -> None:
if not self._atomic:
func(checkpoint, path, **self.kwargs)
else:
tmp = tempfile.NamedTemporaryFile(delete=False, dir=self.dirname)
tmp_file = tmp.file
tmp_name = tmp.name
try:
func(checkpoint, tmp_file, **self.kwargs)
except BaseException:
tmp.close()
os.remove(tmp_name)
raise
else:
tmp.close()
os.replace(tmp.name, path)
# append group/others read mode
os.chmod(path, os.stat(path).st_mode | stat.S_IRGRP | stat.S_IROTH)
def remove(self, filename: str) -> None:
if idist.get_rank() == self.save_on_rank:
path = self.dirname / filename
path.unlink()
class ModelCheckpoint(Checkpoint):
"""ModelCheckpoint handler, inherits from :class:`~ignite.handlers.checkpoint.Checkpoint`, can be used
to periodically save objects to disk only. If needed to store checkpoints to
another storage type, please consider :class:`~ignite.handlers.checkpoint.Checkpoint`.
It also provides `last_checkpoint` attribute to show the last saved checkpoint.
This handler expects two arguments:
- an :class:`~ignite.engine.engine.Engine` object
- a `dict` mapping names (`str`) to objects that should be saved to disk.
See Examples for further details.
.. warning::
Behaviour of this class has been changed since v0.3.0.
There is no more internal counter that has been used to indicate the number of save actions. User could
see its value `step_number` in the filename, e.g. `{filename_prefix}_{name}_{step_number}.pt`. Actually,
`step_number` is replaced by current engine's epoch if `score_function` is specified and current iteration
otherwise.
A single `pt` file is created instead of multiple files.
Args:
dirname: Directory path where objects will be saved.
filename_prefix: Prefix for the file names to which objects will be saved. See Notes of
:class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
score_function: if not None, it should be a function taking a single argument, an
:class:`~ignite.engine.engine.Engine` object, and return a score (`float`). Objects with highest scores
will be retained.
score_name: if ``score_function`` not None, it is possible to store its value using
`score_name`. See Examples of :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
n_saved: Number of objects that should be kept on disk. Older files will be removed. If set to
`None`, all objects are kept.
atomic: If True, objects are serialized to a temporary file, and then moved to final
destination, so that files are guaranteed to not be damaged (for example if exception
occurs during saving).
require_empty: If True, will raise exception if there are any files starting with
``filename_prefix`` in the directory ``dirname``.
create_dir: If True, will create directory ``dirname`` if it does not exist.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided, uses function output as global_step.
To setup global step from another engine, please use :meth:`~ignite.handlers.global_step_from_engine`.
filename_pattern: If ``filename_pattern`` is provided, this pattern will be used to render
checkpoint filenames. If the pattern is not defined, the default pattern would be used.
See :class:`~ignite.handlers.checkpoint.Checkpoint` for details.
include_self: Whether to include the `state_dict` of this object in the checkpoint. If `True`, then
there must not be another object in ``to_save`` with key ``checkpointer``.
greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise, the first model.
Default, `False`.
save_on_rank: Which rank to save the objects on, in the distributed configuration. Used to
instantiate a :class:`~ignite.handlers.DiskSaver` and is also passed to the parent class.
kwargs: Accepted keyword arguments for `torch.save` or `xm.save` in `DiskSaver`.
.. versionchanged:: 0.4.2
Accept ``kwargs`` for `torch.save` or `xm.save`
.. versionchanged:: 0.4.9
Accept ``filename_pattern`` and ``greater_or_equal`` for parity
with :class:`~ignite.handlers.checkpoint.Checkpoint`
.. versionchanged:: 0.4.10
Added `save_on_rank` arg to save objects on this rank in a distributed configuration
Examples:
.. testcode:: python
import os
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from torch import nn
trainer = Engine(lambda engine, batch: None)
handler = ModelCheckpoint('/tmp/models', 'myprefix', n_saved=2, create_dir=True, require_empty=False)
model = nn.Linear(3, 3)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), handler, {'mymodel': model})
trainer.run([0, 1, 2, 3, 4], max_epochs=6)
print(sorted(os.listdir('/tmp/models')))
print(handler.last_checkpoint)
.. testoutput:: python
['myprefix_mymodel_20.pt', 'myprefix_mymodel_30.pt']
/tmp/models/myprefix_mymodel_30.pt
"""
def __init__(
self,
dirname: Union[str, Path],
filename_prefix: str = "",
score_function: Optional[Callable] = None,
score_name: Optional[str] = None,
n_saved: Union[int, None] = 1,
atomic: bool = True,
require_empty: bool = True,
create_dir: bool = True,
global_step_transform: Optional[Callable] = None,
filename_pattern: Optional[str] = None,
include_self: bool = False,
greater_or_equal: bool = False,
save_on_rank: int = 0,
**kwargs: Any,
):
disk_saver = DiskSaver(
dirname,
atomic=atomic,
create_dir=create_dir,
require_empty=require_empty,
save_on_rank=save_on_rank,
**kwargs,
)
super(ModelCheckpoint, self).__init__(
to_save={},
save_handler=disk_saver,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
n_saved=n_saved,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
include_self=include_self,
greater_or_equal=greater_or_equal,
save_on_rank=save_on_rank,
)
@property
def last_checkpoint(self) -> Optional[Union[str, Path]]:
if len(self._saved) < 1:
return None
if not isinstance(self.save_handler, DiskSaver):
raise RuntimeError(f"Internal error, save_handler should be DiskSaver, but has {type(self.save_handler)}.")
return self.save_handler.dirname / self._saved[-1].filename
def __call__(self, engine: Engine, to_save: Mapping): # type: ignore
if len(to_save) == 0:
raise RuntimeError("No objects to checkpoint found.")
self._check_objects(to_save, "state_dict")
self.to_save = to_save
super(ModelCheckpoint, self).__call__(engine)
|
import logging
import numbers
from typing import Callable, Union
import torch
from ignite.engine import Engine
from ignite.utils import apply_to_type, setup_logger
__all__ = ["TerminateOnNan"]
class TerminateOnNan:
"""TerminateOnNan handler can be used to stop the training if the `process_function`'s output
contains a NaN or infinite number or `torch.tensor`.
The output can be of type: number, tensor or collection of them. The training is stopped if
there is at least a single number/tensor have NaN or Infinite value. For example, if the output is
`[1.23, torch.tensor(...), torch.tensor(float('nan'))]` the handler will stop the training.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into a number or `torch.tensor`
or collection of them. This can be useful if, for example, you have a multi-output model and
you want to check one or multiple values of the output.
Examples:
.. code-block:: python
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
self.logger.addHandler(logging.StreamHandler())
self._output_transform = output_transform
def __call__(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
def raise_error(x: Union[float, torch.Tensor]) -> None:
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self.logger.warning(f"{self.__class__.__name__}: Output '{output}' contains NaN or Inf. Stop training")
engine.terminate()
|
# coding: utf-8
import contextlib
import logging
import tempfile
import warnings
from math import ceil
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint
from ignite.handlers.param_scheduler import LRScheduler, ParamGroupScheduler, PiecewiseLinear
class FastaiLRFinder:
"""Learning rate finder handler for supervised trainers.
While attached, the handler increases the learning rate in between two
boundaries in a linear or exponential manner. It provides valuable
information on how well the network can be trained over a range of learning
rates and what can be an optimal learning rate.
Examples:
.. code-block:: python
from ignite.handlers import FastaiLRFinder
trainer = ...
model = ...
optimizer = ...
lr_finder = FastaiLRFinder()
to_save = {"model": model, "optimizer": optimizer}
with lr_finder.attach(trainer, to_save=to_save) as trainer_with_lr_finder:
trainer_with_lr_finder.run(dataloader)
# Get lr_finder results
lr_finder.get_results()
# Plot lr_finder results (requires matplotlib)
lr_finder.plot()
# get lr_finder suggestion for lr
lr_finder.lr_suggestion()
Note:
When context manager is exited all LR finder's handlers are removed.
Note:
Please, also keep in mind that all other handlers attached the trainer will be executed during LR finder's run.
Note:
This class may require `matplotlib` package to be installed to plot learning rate range test:
.. code-block:: bash
pip install matplotlib
References:
Cyclical Learning Rates for Training Neural Networks:
https://arxiv.org/abs/1506.01186
fastai/lr_find: https://github.com/fastai/fastai
.. versionadded:: 0.4.6
"""
_lr_schedule: Union[LRScheduler, PiecewiseLinear, ParamGroupScheduler]
def __init__(self) -> None:
self._diverge_flag = False
self._history: Dict[str, List[Any]] = {}
self._best_loss = None
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
def _run(
self,
trainer: Engine,
optimizer: Optimizer,
output_transform: Callable,
num_iter: int,
start_lrs: List[float],
end_lrs: List[float],
step_mode: str,
smooth_f: float,
diverge_th: float,
) -> None:
self._history = {"lr": [], "loss": []}
self._best_loss = None
self._diverge_flag = False
# attach LRScheduler to trainer.
if num_iter is None:
num_iter = trainer.state.epoch_length * trainer.state.max_epochs
else:
max_iter = trainer.state.epoch_length * trainer.state.max_epochs # type: ignore[operator]
if max_iter < num_iter:
max_iter = num_iter
trainer.state.max_iters = num_iter
trainer.state.max_epochs = ceil(num_iter / trainer.state.epoch_length) # type: ignore[operator]
if not trainer.has_event_handler(self._reached_num_iterations):
trainer.add_event_handler(Events.ITERATION_COMPLETED, self._reached_num_iterations, num_iter)
# attach loss and lr logging
if not trainer.has_event_handler(self._log_lr_and_loss):
trainer.add_event_handler(
Events.ITERATION_COMPLETED, self._log_lr_and_loss, output_transform, smooth_f, diverge_th
)
self.logger.debug(f"Running LR finder for {num_iter} iterations")
# Initialize the proper learning rate policy
if step_mode.lower() == "exp":
self._lr_schedule = LRScheduler(_ExponentialLR(optimizer, start_lrs, end_lrs, num_iter))
else:
if len(start_lrs) == 1:
self._lr_schedule = PiecewiseLinear(
optimizer,
param_name="lr",
milestones_values=[(0, start_lrs[0]), (num_iter, end_lrs[0])],
)
else:
self._lr_schedule = ParamGroupScheduler(
[
PiecewiseLinear(
optimizer,
param_name="lr",
milestones_values=[(0, start_lrs[i]), (num_iter, end_lrs[i])],
param_group_index=i,
)
for i in range(len(optimizer.param_groups))
]
)
if not trainer.has_event_handler(self._lr_schedule):
trainer.add_event_handler(Events.ITERATION_COMPLETED, self._lr_schedule, num_iter)
def _reset(self, trainer: Engine) -> None:
self.logger.debug("Completed LR finder run")
trainer.remove_event_handler(self._lr_schedule, Events.ITERATION_COMPLETED)
trainer.remove_event_handler(self._log_lr_and_loss, Events.ITERATION_COMPLETED)
trainer.remove_event_handler(self._reached_num_iterations, Events.ITERATION_COMPLETED)
def _log_lr_and_loss(self, trainer: Engine, output_transform: Callable, smooth_f: float, diverge_th: float) -> None:
output = trainer.state.output
loss = output_transform(output)
if not isinstance(loss, float):
if isinstance(loss, torch.Tensor):
if (loss.ndimension() == 0) or (loss.ndimension() == 1 and len(loss) == 1):
loss = loss.item()
else:
raise ValueError(
"if output of the engine is torch.Tensor, then "
"it must be 0d torch.Tensor or 1d torch.Tensor with 1 element, "
f"but got torch.Tensor of shape {loss.shape}"
)
else:
raise TypeError(
"output of the engine should be of type float or 0d torch.Tensor "
"or 1d torch.Tensor with 1 element, "
f"but got output of type {type(loss).__name__}"
)
loss = idist.all_reduce(loss)
lr = self._lr_schedule.get_param()
self._history["lr"].append(lr)
if trainer.state.iteration == 1:
self._best_loss = loss
else:
if smooth_f > 0:
loss = smooth_f * loss + (1 - smooth_f) * self._history["loss"][-1]
if loss < self._best_loss:
self._best_loss = loss
self._history["loss"].append(loss)
# Check if the loss has diverged; if it has, stop the trainer
if self._history["loss"][-1] > diverge_th * self._best_loss: # type: ignore[operator]
self._diverge_flag = True
self.logger.info("Stopping early, the loss has diverged")
trainer.terminate()
def _reached_num_iterations(self, trainer: Engine, num_iter: int) -> None:
if trainer.state.iteration > num_iter:
trainer.terminate()
def _warning(self, _: Any) -> None:
if not self._diverge_flag:
warnings.warn(
"Run completed without loss diverging, increase end_lr, decrease diverge_th or look"
" at lr_finder.plot()",
UserWarning,
)
def _detach(self, trainer: Engine) -> None:
"""
Detaches lr_finder from trainer.
Args:
trainer: the trainer to detach form.
"""
if trainer.has_event_handler(self._run, Events.STARTED):
trainer.remove_event_handler(self._run, Events.STARTED)
if trainer.has_event_handler(self._warning, Events.COMPLETED):
trainer.remove_event_handler(self._warning, Events.COMPLETED)
if trainer.has_event_handler(self._reset, Events.COMPLETED):
trainer.remove_event_handler(self._reset, Events.COMPLETED)
def get_results(self) -> Dict[str, List[Any]]:
"""
Returns:
Dictionary with loss and lr logs from the previous run
"""
return self._history
def plot(
self,
skip_start: int = 10,
skip_end: int = 5,
log_lr: bool = True,
display_suggestion: bool = True,
ax: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Plots the learning rate range test.
This method requires ``matplotlib`` package to be installed:
.. code-block:: bash
pip install matplotlib
Args:
skip_start: number of batches to trim from the start.
Default: 10.
skip_end: number of batches to trim from the start.
Default: 5.
log_lr: True to plot the learning rate in a logarithmic
scale; otherwise, plotted in a linear scale. Default: True.
display_suggestion: if True, red dot shows the suggested learning rate.
ax: Pre-existing axes for the plot. Default: None.
kwargs: optional kwargs passed to ``plt.subplots`` if ``ax`` is not provided.
.. code-block:: python
ax = lr_finder.plot(skip_end=0)
ax.figure.savefig("output.jpg")
"""
try:
from matplotlib import pyplot as plt
except ImportError:
raise ModuleNotFoundError(
"This method requires matplotlib to be installed. "
"Please install it with command: \n pip install matplotlib"
)
if not self._history:
raise RuntimeError("learning rate finder didn't run yet so results can't be plotted")
if skip_start < 0:
raise ValueError("skip_start cannot be negative")
if skip_end < 0:
raise ValueError("skip_end cannot be negative")
# Get the data to plot from the history dictionary.
lrs = self._history["lr"]
losses = self._history["loss"]
num_groups = len(lrs[0]) if isinstance(lrs[0], list) else 1
legends = [f"suggested lr for param_groups {i}" for i in range(num_groups)]
if ax is None:
fig, ax = plt.subplots(**kwargs)
# Check to show the suggested learning rate
if display_suggestion:
sug_lr = self.lr_suggestion()
idx = self._history["lr"].index(sug_lr)
if skip_start >= idx:
warnings.warn(
"skip_start is larger than the suggested LR found"
" and it will not be visible on the plot. Please, make the value smaller.",
UserWarning,
)
corresponding_loss = self._history["loss"][int(idx)]
# Check if optimizer has multiple param_groups
if not isinstance(sug_lr, list):
sug_lr = [
sug_lr,
]
for lr in sug_lr:
ax.scatter(
lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3
)
# handle skip_end=0 properly
if skip_end == 0:
lrs = lrs[skip_start:]
losses = losses[skip_start:]
else:
lrs = lrs[skip_start:-skip_end]
losses = losses[skip_start:-skip_end]
plt.legend(legends)
# Plot loss as a function of the learning rate
ax.plot(lrs, losses)
if log_lr:
ax.set_xscale("log")
lr_min = min(lrs[0]) if isinstance(lrs[0], list) else lrs[0]
lr_max = max(lrs[-1]) if isinstance(lrs[-1], list) else lrs[-1]
ax.set_xlim([lr_min, lr_max])
ax.set_xlabel("Learning rate")
ax.set_ylabel("Loss")
plt.show()
return ax
def lr_suggestion(self) -> Any:
"""
Returns:
Learning rate at the minimum numerical gradient
(ignoring the increasing part of the curve)
"""
if not self._history:
raise RuntimeError("learning rate finder didn't run yet so lr_suggestion can't be returned")
loss = self._history["loss"]
min_loss_idx = torch.tensor(loss).argmin()
# Ignore the increasing part of the curve
decreasing_losses = self._history["loss"][: int(min_loss_idx.item()) + 1]
if len(decreasing_losses) < 3:
raise RuntimeError(
"FastaiLRFinder got unexpected curve shape, the curve should be somehow U-shaped, "
"please decrease start_lr or increase end_lr to resolve this issue."
)
losses = torch.tensor(decreasing_losses)
grads = torch.tensor([0.5 * (losses[i + 1] - losses[i - 1]) for i in range(1, len(losses) - 1)])
min_grad_idx = grads.argmin() + 1
return self._history["lr"][int(min_grad_idx)]
def apply_suggested_lr(self, optimizer: Optimizer) -> None:
"""
Applying the suggested learning rate(s) on the given optimizer.
Args:
optimizer: the optimizer to apply the suggested learning rate(s) on.
Note:
The given optimizer must be the same as the one we before found the suggested learning rate for.
"""
sug_lr = self.lr_suggestion()
if not isinstance(sug_lr, list):
sug_lr = [
sug_lr,
]
if len(sug_lr) != len(optimizer.param_groups):
raise RuntimeError(
"The number of parameter groups does not match between "
"given optimizer and the one used for estimating the "
f"learning rate: {len(sug_lr)} vs {len(optimizer.param_groups)}"
)
for i, lr in enumerate(sug_lr):
optimizer.param_groups[i]["lr"] = lr
@contextlib.contextmanager
def attach(
self,
trainer: Engine,
to_save: Mapping,
output_transform: Callable = lambda output: output,
num_iter: Optional[int] = None,
start_lr: Optional[Union[float, List[float]]] = None,
end_lr: Optional[Union[float, List[float]]] = 10.0,
step_mode: str = "exp",
smooth_f: float = 0.05,
diverge_th: float = 5.0,
) -> Any:
"""Attaches lr_finder to a given trainer. It also resets model and optimizer at the end of the run.
Args:
trainer: lr_finder is attached to this trainer. Please, keep in mind that all attached handlers
will be executed.
to_save: dictionary with optimizer and other objects that needs to be restored after running
the LR finder. For example, ``to_save={'optimizer': optimizer, 'model': model}``.
It should contain "optimizer" key for the optimizer.
Also all objects should implement ``state_dict`` and ``load_state_dict`` methods.
output_transform: function that transforms the trainer's ``state.output`` after each
iteration. It must return the loss of that iteration.
num_iter: number of iterations for lr schedule between base lr and end_lr. Default, it will
run for ``trainer.state.epoch_length * trainer.state.max_epochs``.
start_lr: lower bound for lr search. Default, Learning Rate specified with the optimizer.
end_lr: upper bound for lr search. Default, 10.0.
step_mode: "exp" or "linear", which way should the lr be increased from ``start_lr``
to ``end_lr``. Default, "exp".
smooth_f: loss smoothing factor in range ``[0, 1)``. Default, 0.05
diverge_th: Used for stopping the search when ``current loss > diverge_th * best_loss``.
Default, 5.0.
Returns:
trainer_with_lr_finder (trainer used for finding the lr)
Examples:
.. code-block:: python
to_save = {"model": model, "optimizer": optimizer}
with lr_finder.attach(trainer, to_save=to_save) as trainer_with_lr_finder:
trainer_with_lr_finder.run(dataloader)
Note:
lr_finder cannot be attached to more than one trainer at a time.
"""
if not isinstance(to_save, Mapping):
raise TypeError(f"Argument to_save should be a mapping, but given {type(to_save)}")
Checkpoint._check_objects(to_save, "state_dict")
Checkpoint._check_objects(to_save, "load_state_dict")
if "optimizer" not in to_save:
raise ValueError("Mapping to_save should contain 'optimizer' key")
if not isinstance(to_save["optimizer"], torch.optim.Optimizer):
raise TypeError(
f"Object to_save['optimizer'] should be torch optimizer, but given {type(to_save['optimizer'])}"
)
if smooth_f < 0 or smooth_f >= 1:
raise ValueError("smooth_f is outside the range [0, 1]")
if diverge_th < 1:
raise ValueError("diverge_th should be larger than 1")
if step_mode not in ["exp", "linear"]:
raise ValueError(f"step_mode should be 'exp' or 'linear', but given {step_mode}")
if num_iter is not None:
if not isinstance(num_iter, int):
raise TypeError(f"if provided, num_iter should be an integer, but give {num_iter}")
if num_iter <= 0:
raise ValueError(f"if provided, num_iter should be positive, but give {num_iter}")
optimizer = to_save["optimizer"]
if start_lr is None:
start_lrs = [pg["lr"] for pg in optimizer.param_groups]
elif isinstance(start_lr, float):
start_lrs = [start_lr] * len(optimizer.param_groups)
elif isinstance(start_lr, list):
if len(start_lr) != len(optimizer.param_groups):
raise ValueError(
"Number of values of start_lr should be equal to optimizer values."
f"start_lr values:{len(start_lr)} optimizer values: {len(optimizer.param_groups)}"
)
start_lrs = start_lr
else:
raise TypeError(f"start_lr should be a float or list of floats, but given {type(start_lr)}")
if isinstance(end_lr, float):
end_lrs = [end_lr] * len(optimizer.param_groups)
elif isinstance(end_lr, list):
if len(end_lr) != len(optimizer.param_groups):
raise ValueError(
"Number of values of end_lr should be equal to optimizer values."
f"end_lr values:{len(end_lr)} optimizer values: {len(optimizer.param_groups)}"
)
end_lrs = end_lr
else:
raise TypeError(f"end_lr should be a float or list of floats, but given {type(end_lr)}")
for start, end in zip(start_lrs, end_lrs):
if start >= end:
raise ValueError(f"start_lr must be less than end_lr, start_lr={start_lr} vs end_lr={end_lr}")
# store to_save
with tempfile.TemporaryDirectory() as tmpdirname:
obj = {k: o.state_dict() for k, o in to_save.items()}
# add trainer
obj["trainer"] = trainer.state_dict()
cache_filepath = Path(tmpdirname) / "ignite_lr_finder_cache.pt"
torch.save(obj, cache_filepath.as_posix())
# Attach handlers
if not trainer.has_event_handler(self._run):
trainer.add_event_handler(
Events.STARTED,
self._run,
optimizer,
output_transform,
num_iter,
start_lrs,
end_lrs,
step_mode,
smooth_f,
diverge_th,
)
if not trainer.has_event_handler(self._warning):
trainer.add_event_handler(Events.COMPLETED, self._warning)
if not trainer.has_event_handler(self._reset):
trainer.add_event_handler(Events.COMPLETED, self._reset)
yield trainer
self._detach(trainer)
# restore to_save and reset trainer's state
obj = torch.load(cache_filepath.as_posix())
trainer.load_state_dict(obj["trainer"])
for k, o in obj.items():
if k in to_save:
to_save[k].load_state_dict(o)
class _ExponentialLR(_LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of
iterations.
Args:
optimizer: wrapped optimizer.
start_lrs: the initial learning rate for parameter groups.
end_lrs: the final learning rate for parameter groups.
num_iter: the number of iterations over which the test
occurs. Default: 100.
last_epoch: the index of last epoch. Default: -1.
"""
def __init__(
self, optimizer: Optimizer, start_lrs: List[float], end_lrs: List[float], num_iter: int, last_epoch: int = -1
):
self.end_lrs = end_lrs
self.num_iter = num_iter
super(_ExponentialLR, self).__init__(optimizer, last_epoch)
# override base_lrs
self.base_lrs = start_lrs
def get_lr(self) -> List[float]: # type: ignore[override]
curr_iter = self.last_epoch + 1
r = curr_iter / self.num_iter
return [base_lr * (end_lr / base_lr) ** r for end_lr, base_lr in zip(self.end_lrs, self.base_lrs)]
|
from typing import Any, Callable, Optional
from ignite.engine import Engine
from ignite.engine.events import Events
from ignite.handlers.checkpoint import Checkpoint, DiskSaver, ModelCheckpoint
from ignite.handlers.early_stopping import EarlyStopping
from ignite.handlers.ema_handler import EMAHandler
from ignite.handlers.lr_finder import FastaiLRFinder
from ignite.handlers.param_scheduler import (
BaseParamScheduler,
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
CyclicalScheduler,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
ReduceLROnPlateauScheduler,
)
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateScheduler,
PiecewiseLinearStateScheduler,
StateParamScheduler,
StepStateScheduler,
)
from ignite.handlers.stores import EpochOutputStore
from ignite.handlers.terminate_on_nan import TerminateOnNan
from ignite.handlers.time_limit import TimeLimit
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
from ignite.handlers.timing import Timer
__all__ = [
"ModelCheckpoint",
"Checkpoint",
"DiskSaver",
"Timer",
"EarlyStopping",
"TerminateOnNan",
"global_step_from_engine",
"TimeLimit",
"EpochOutputStore",
"ConcatScheduler",
"CosineAnnealingScheduler",
"LinearCyclicalScheduler",
"LRScheduler",
"ParamGroupScheduler",
"ParamScheduler",
"PiecewiseLinear",
"CyclicalScheduler",
"create_lr_scheduler_with_warmup",
"FastaiLRFinder",
"EMAHandler",
"BasicTimeProfiler",
"HandlersTimeProfiler",
"BaseParamScheduler",
"StateParamScheduler",
"LambdaStateScheduler",
"PiecewiseLinearStateScheduler",
"ExpStateScheduler",
"StepStateScheduler",
"MultiStepStateScheduler",
"ReduceLROnPlateauScheduler",
]
def global_step_from_engine(engine: Engine, custom_event_name: Optional[Events] = None) -> Callable:
"""Helper method to setup `global_step_transform` function using another engine.
This can be helpful for logging trainer epoch/iteration while output handler is attached to an evaluator.
Args:
engine: engine which state is used to provide the global step
custom_event_name: registered event name. Optional argument, event name to use.
Returns:
global step based on provided engine
"""
def wrapper(_: Any, event_name: Events) -> int:
if custom_event_name is not None:
event_name = custom_event_name
return engine.state.get_event_attrib_value(event_name)
return wrapper
|
from typing import Any, Callable, List, Optional
from ignite.engine import Engine, Events
class EpochOutputStore:
"""EpochOutputStore handler to save output prediction and target history
after every epoch, could be useful for e.g., visualization purposes.
Note:
This can potentially lead to a memory error if the output data is
larger than available RAM.
Args:
output_transform: a callable that is used to
transform the :class:`~ignite.engine.engine.Engine`'s
``process_function``'s output , e.g., lambda x: x[0]
Attributes:
data: a list of :class:`~ignite.engine.engine.Engine` outputs,
optionally transformed by `output_transform`.
Examples:
.. code-block:: python
eos = EpochOutputStore()
trainer = create_supervised_trainer(model, optimizer, loss)
train_evaluator = create_supervised_evaluator(model, metrics)
eos.attach(train_evaluator, 'output')
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
train_evaluator.run(train_loader)
output = train_evaluator.state.output
# output = [(y_pred0, y0), (y_pred1, y1), ...]
# do something with output, e.g., plotting
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.5
`attach` now accepts an optional argument `name`
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.data: List[Any] = []
self.output_transform = output_transform
def reset(self) -> None:
"""Reset the attribute data to empty list."""
self.data = []
def update(self, engine: Engine) -> None:
"""Append the output of Engine to attribute data."""
output = self.output_transform(engine.state.output)
self.data.append(output)
def store(self, engine: Engine) -> None:
"""Store `self.data` on `engine.state.{self.name}`"""
setattr(engine.state, self.name, self.data)
def attach(self, engine: Engine, name: Optional[str] = None) -> None:
"""Attaching `reset` method at EPOCH_STARTED and
`update` method at ITERATION_COMPLETED.
If `name` is passed, will store `self.data` on `engine.state`
under `name`.
"""
engine.add_event_handler(Events.EPOCH_STARTED, self.reset)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.update)
if name:
self.name = name
engine.add_event_handler(Events.EPOCH_COMPLETED, self.store)
|
import functools
from collections import OrderedDict
from typing import Any, Callable, cast, Dict, List, Mapping, Sequence, Tuple, Union
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
Events.INTERRUPT,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times: Dict[EventEnum, torch.Tensor] = {}
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[float, float]]]:
# compute on non-zero data:
data = data[data > 0]
out: List[Tuple[str, Union[str, float, Tuple[float, float]]]] = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
]
if len(data) > 1:
out.extend(
[
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
)
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time: Union[int, torch.Tensor] = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
)
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise ModuleNotFoundError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times: List[float] = []
self.processing_times: List[float] = []
self.event_handlers_times: Dict[EventEnum, Dict[str, List[float]]] = {}
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total: Union[str, float] = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered"
min_index: Tuple[Union[str, float], Union[str, float]] = ("None", "None")
max_index: Tuple[Union[str, float], Union[str, float]] = ("None", "None")
mean: Union[str, float] = "None"
std: Union[str, float] = "None"
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise ModuleNotFoundError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
|
import numbers
import warnings
from bisect import bisect_right
from typing import Any, List, Sequence, Tuple, Union
from ignite.engine import CallableEventWithFilter, Engine, Events, EventsList
from ignite.handlers.param_scheduler import BaseParamScheduler
class StateParamScheduler(BaseParamScheduler):
"""An abstract class for updating an engine state parameter values during training.
Args:
param_name: name of parameter to update.
save_history: whether to log the parameter values to ``engine.state.param_history``, (default=False).
create_new: whether to create ``param_name`` on ``engine.state`` taking into account whether ``param_name``
attribute already exists or not. Overrides existing attribute by default, (default=False).
Note:
Parameter scheduler works independently of the internal state of the attached engine.
More precisely, whatever the state of the engine (newly created or used by another scheduler) the scheduler
sets defined absolute values.
.. versionadded:: 0.4.7
"""
def __init__(self, param_name: str, save_history: bool = False, create_new: bool = False):
super(StateParamScheduler, self).__init__(param_name, save_history)
self.create_new = create_new
def attach(
self,
engine: Engine,
event: Union[str, Events, CallableEventWithFilter, EventsList] = Events.ITERATION_COMPLETED,
) -> None:
"""Attach the handler to the engine. Once the handler is attached, the ``Engine.state`` will have a new
attribute with the name ``param_name``. Then the current value of the parameter can be retrieved from
``Engine.state`` when the engine is running.
Args:
engine: trainer to which the handler will be attached.
event: trigger ``param_name`` value update.
"""
if hasattr(engine.state, self.param_name):
if self.create_new:
raise ValueError(
f"Attribute '{self.param_name}' already exists in the engine.state. "
f"This may be a conflict between multiple handlers. "
f"Please choose another name."
)
else:
if not self.create_new:
warnings.warn(
f"Attribute '{self.param_name}' is not defined in the engine.state. "
f"{type(self).__name__} will create it. Remove this warning by setting create_new=True."
)
setattr(engine.state, self.param_name, None)
if self.save_history:
if not hasattr(engine.state, "param_history") or engine.state.param_history is None:
setattr(engine.state, "param_history", {})
engine.state.param_history.setdefault(self.param_name, []) # type: ignore[attr-defined]
engine.add_event_handler(event, self)
def __call__(self, engine: Engine) -> None:
self.event_index += 1
value = self.get_param()
setattr(engine.state, self.param_name, value)
if self.save_history:
engine.state.param_history[self.param_name].append(value) # type: ignore[attr-defined]
@classmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled engine state parameter values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
Examples:
.. code-block:: python
import matplotlib.pyplot as plt
import numpy as np
step_state_param_values = np.array(
StepStateScheduler.simulate_values(
num_events=20, param_name="step_scheduled_param", initial_value=10, gamma=0.99, step_size=5
)
)
plt.plot(step_state_param_values[:, 0], step_state_param_values[:, 1], label="learning rate")
plt.xlabel("events")
plt.ylabel("values")
plt.legend()
"""
for key in ["save_history"]:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(save_history=False, **scheduler_kwargs)
engine = Engine(lambda e, b: None)
for i in range(num_events):
scheduler(engine=engine)
values.append([i, getattr(engine.state, scheduler_kwargs["param_name"])])
return values
class LambdaStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a user defined callable object.
User defined callable object is taking an event index as input and returns parameter value.
Args:
lambda_obj: user defined callable object.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
param_scheduler = LambdaStateScheduler(
param_name="param", lambda_obj=LambdaState(1, 0.9), create_new=True
)
# parameter is param, initial_value sets param to 1 and in this example gamma = 1
# using class 'LambdaState' user defined callable object can be created
# update a parameter during training by using a user defined callable object
# user defined callable object is taking an event index as input and returns parameter value
# in this example, we update as initial_value * gamma ** (event_endex % 9)
# in every Epoch the parameter is updated as 1 * 0.9 ** (Epoch % 9)
# In Epoch 3, parameter param = 1 * 0.9 ** (3 % 9) = 0.729
# In Epoch 10, parameter param = 1 * 0.9 ** (10 % 9) = 0.9
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=10)
.. testoutput::
0.9
0.81
0.7290...
0.6561
0.5904...
0.5314...
0.4782...
0.4304...
1.0
0.9
.. versionadded:: 0.4.7
"""
def __init__(self, lambda_obj: Any, param_name: str, save_history: bool = False, create_new: bool = False):
super(LambdaStateScheduler, self).__init__(param_name, save_history, create_new)
if not callable(lambda_obj):
raise ValueError("Expected lambda_obj to be callable.")
self.lambda_obj = lambda_obj
self._state_attrs += ["lambda_obj"]
def get_param(self) -> Union[List[float], float]:
return self.lambda_obj(self.event_index)
class PiecewiseLinearStateScheduler(StateParamScheduler):
"""Piecewise linear state parameter scheduler.
Args:
milestones_values: list of tuples (event index, parameter value)
represents milestones and parameter values. Milestones should be increasing integers.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = PiecewiseLinearStateScheduler(
param_name="param", milestones_values=[(5, 1.0), (10, 0.8), (15, 0.6)], create_new=True
)
# parameter is param, milestone (5, 1.0) sets param to 1.0
# milestone is (5, 1.0), param=1 for Epoch 1 to 5,
# next milestone is (10, 0.8), param linearly reduces from 1.0 to 0.8
# Epoch 10, param = 0.8
# next milestone is (15,0.6), param linearly reduces from 0.8 to 0.6
# Epoch 15, param = 0.6
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=15)
.. testoutput::
1.0
1.0
1.0
1.0
1.0
0.96
0.92
0.88
0.8400...
0.8
0.76
0.72
0.68
0.64
0.6
.. versionadded:: 0.4.7
"""
def __init__(
self,
milestones_values: List[Tuple[int, float]],
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(PiecewiseLinearStateScheduler, self).__init__(param_name, save_history, create_new)
if not isinstance(milestones_values, Sequence):
raise TypeError(
f"Argument milestones_values should be a list or tuple, but given {type(milestones_values)}"
)
if len(milestones_values) < 1:
raise ValueError(
f"Argument milestones_values should be with at least one value, but given {milestones_values}"
)
values: List[float] = []
milestones: List[int] = []
for pair in milestones_values:
if not isinstance(pair, tuple) or len(pair) != 2:
raise ValueError("Argument milestones_values should be a list of pairs (milestone, param_value)")
if not isinstance(pair[0], numbers.Integral):
raise TypeError(f"Value of a milestone should be integer, but given {type(pair[0])}")
if len(milestones) > 0 and pair[0] < milestones[-1]:
raise ValueError(
f"Milestones should be increasing integers, but given {pair[0]} is smaller "
f"than the previous milestone {milestones[-1]}"
)
milestones.append(pair[0])
values.append(pair[1])
self.values = values
self.milestones = milestones
self._index = 0
self._state_attrs += ["values", "milestones", "_index"]
def _get_start_end(self) -> Tuple[int, int, float, float]:
if self.milestones[0] > self.event_index:
return self.event_index - 1, self.event_index, self.values[0], self.values[0]
elif self.milestones[-1] <= self.event_index:
return (self.event_index, self.event_index + 1, self.values[-1], self.values[-1])
elif self.milestones[self._index] <= self.event_index < self.milestones[self._index + 1]:
return (
self.milestones[self._index],
self.milestones[self._index + 1],
self.values[self._index],
self.values[self._index + 1],
)
else:
self._index += 1
return self._get_start_end()
def get_param(self) -> Union[List[float], float]:
start_index, end_index, start_value, end_value = self._get_start_end()
return start_value + (end_value - start_value) * (self.event_index - start_index) / (end_index - start_index)
class ExpStateScheduler(StateParamScheduler):
"""Update a parameter during training by using exponential function.
The function decays the parameter value by gamma every step.
Based on the closed form of ExponentialLR from PyTorch
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = ExpStateScheduler(
param_name="param", initial_value=1, gamma=0.9, create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1, param changes from 1 to 1*0.9, param = 0.9
# Epoch 2, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 3, param changes from 0.81 to 0.81*0.9, param = 0.729
# Epoch 4, param changes from 0.81 to 0.729*0.9, param = 0.6561
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=4)
.. testoutput::
0.9
0.81
0.7290...
0.6561
.. versionadded:: 0.4.7
"""
def __init__(
self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False
):
super(ExpStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self._state_attrs += ["initial_value", "gamma"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma**self.event_index
class StepStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a step function.
This function decays the parameter value by gamma every step_size.
Based on StepLR from PyTorch.
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
step_size: Period of parameter value decay.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = StepStateScheduler(
param_name="param", initial_value=1, gamma=0.9, step_size=5, create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1 to 4, param does not change as step size is 5,
# Epoch 5, param changes from 1 to 1*0.9, param = 0.9
# Epoch 5 to 9, param = 0.9 as step size is 5,
# Epoch 10, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 10 to 14, param = 0.81, as step size is 5
# Epoch 15, param changes from 0.81 to 0.81*0.9, param = 0.729
# and so on ... the param change at Epoch = 5, 10, 15, 20, . . .
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED(every=5))
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=25)
.. testoutput::
0.9
0.81
0.7290...
0.6561
0.5904...
.. versionadded:: 0.4.7
"""
def __init__(
self,
initial_value: float,
gamma: float,
step_size: int,
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(StepStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self.step_size = step_size
self._state_attrs += ["initial_value", "gamma", "step_size"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma ** (self.event_index // self.step_size)
class MultiStepStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a multi step function.
The function decays the parameter value by gamma once the number of steps reaches one of the milestones.
Based on MultiStepLR from PyTorch.
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.MultiStepLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
milestones: List of step indices. Must be increasing.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = MultiStepStateScheduler(
param_name="param", initial_value=1, gamma=0.9, milestones=[3, 6, 9, 12], create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1 to 2, param does not change as milestone is 3
# Epoch 3, param changes from 1 to 1*0.9, param = 0.9
# Epoch 3 to 5, param does not change as milestone is 6
# Epoch 6, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 6 to 8, param does not change as milestone is 9
# Epoch 9, param changes from 0.81 to 0.81*0.9, param = 0.729
# Epoch 9 to 11, param does not change as milestone is 12
# Epoch 12, param changes from 0.729 to 0.729*0.9, param = 0.6561
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=12)
.. testoutput::
1.0
1.0
0.9
0.9
0.9
0.81
0.81
0.81
0.7290...
0.7290...
0.7290...
0.6561
.. versionadded:: 0.4.7
"""
def __init__(
self,
initial_value: float,
gamma: float,
milestones: List[int],
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(MultiStepStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self.milestones = milestones
self._state_attrs += ["initial_value", "gamma", "milestones"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma ** bisect_right(self.milestones, self.event_index)
|
import time
from typing import Optional
from ignite.engine import Engine
__all__ = ["TimeLimit"]
from ignite.utils import setup_logger
class TimeLimit:
"""TimeLimit handler can be used to control training time for computing environments where session time is limited.
Timer starts when handler is created and not training started.
This handler gracefully terminates the training if time passed in the training exceeds a limit.
Args:
limit_sec: Maximum time before training terminates (in seconds). Defaults to 28800.
Examples:
.. code-block:: python
from ignite.engine import Events
from ignite.handlers import TimeLimit
handler = TimeLimit() # 8 hours of training
trainer.add_event_handler(Events.ITERATION_COMPLETED, handler)
.. versionadded:: 0.4.3
"""
def __init__(self, limit_sec: Optional[int] = 28800):
if not isinstance(limit_sec, int):
raise TypeError("Argument limit_sec should be an integer.")
if limit_sec <= 0:
raise ValueError("Argument limit_sec should be a positive integer.")
self.limit_sec = limit_sec
self.start_time = time.time()
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
def __call__(self, engine: Engine) -> None:
elapsed_time = time.time() - self.start_time
if elapsed_time > self.limit_sec:
self.logger.info("Reached the time limit: {} sec. Stop training".format(self.limit_sec))
engine.terminate()
|
from time import perf_counter
from typing import Any, Optional
from ignite.engine import Engine, Events
__all__ = ["Timer"]
class Timer:
"""Timer object can be used to measure (average) time between events.
Args:
average: if True, then when ``.value()`` method is called, the returned value
will be equal to total time measured, divided by the value of internal counter.
Attributes:
total (float): total time elapsed when the Timer was running (in seconds).
step_count (int): internal counter, useful to measure average time, e.g. of processing a single batch.
Incremented with the ``.step()`` method.
running (bool): flag indicating if timer is measuring time.
Note:
When using ``Timer(average=True)`` do not forget to call ``timer.step()`` every time an event occurs. See
the examples below.
Examples:
Measuring total time of the epoch:
.. code-block:: python
from ignite.handlers import Timer
import time
work = lambda : time.sleep(0.1)
idle = lambda : time.sleep(0.1)
t = Timer(average=False)
for _ in range(10):
work()
idle()
t.value()
# 2.003073937026784
Measuring average time of the epoch:
.. code-block:: python
t = Timer(average=True)
for _ in range(10):
work()
idle()
t.step()
t.value()
# 0.2003182829997968
Measuring average time it takes to execute a single ``work()`` call:
.. code-block:: python
t = Timer(average=True)
for _ in range(10):
t.resume()
work()
t.pause()
idle()
t.step()
t.value()
# 0.10016545779653825
Using the Timer to measure average time it takes to process a single batch of examples:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Timer
trainer = Engine(training_update_function)
timer = Timer(average=True)
timer.attach(
trainer,
start=Events.STARTED,
resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED,
step=Events.ITERATION_COMPLETED
)
"""
def __init__(self, average: bool = False):
self._average = average
self.reset()
def attach(
self,
engine: Engine,
start: Events = Events.STARTED,
pause: Events = Events.COMPLETED,
resume: Optional[Events] = None,
step: Optional[Events] = None,
) -> "Timer":
"""Register callbacks to control the timer.
Args:
engine: Engine that this timer will be attached to.
start: Event which should start (reset) the timer.
pause: Event which should pause the timer.
resume: Event which should resume the timer.
step: Event which should call the `step` method of the counter.
Returns:
this timer
"""
engine.add_event_handler(start, self.reset)
engine.add_event_handler(pause, self.pause)
if resume is not None:
engine.add_event_handler(resume, self.resume)
if step is not None:
engine.add_event_handler(step, self.step)
return self
def reset(self, *args: Any) -> "Timer":
"""Reset the timer to zero."""
self._t0 = perf_counter()
self.total = 0.0
self.step_count = 0.0
self.running = True
return self
def pause(self, *args: Any) -> None:
"""Pause the current running timer."""
if self.running:
self.total += self._elapsed()
self.running = False
def resume(self, *args: Any) -> None:
"""Resume the current running timer."""
if not self.running:
self.running = True
self._t0 = perf_counter()
def value(self) -> float:
"""Return the average timer value."""
total = self.total
if self.running:
total += self._elapsed()
if self._average:
denominator = max(self.step_count, 1.0)
else:
denominator = 1.0
return total / denominator
def step(self, *args: Any) -> None:
"""Increment the timer."""
self.step_count += 1.0
def _elapsed(self) -> float:
return perf_counter() - self._t0
|
from collections import OrderedDict
from typing import Callable, cast, Mapping, Optional
from ignite.base import Serializable
from ignite.engine import Engine
from ignite.utils import setup_logger
__all__ = ["EarlyStopping"]
class EarlyStopping(Serializable):
"""EarlyStopping handler can be used to stop the training if no improvement after a given number of events.
Args:
patience: Number of events to wait if no improvement and then stop the training.
score_function: It should be a function taking a single argument, an :class:`~ignite.engine.engine.Engine`
object, and return a score `float`. An improvement is considered if the score is higher.
trainer: Trainer engine to stop the run if no improvement.
min_delta: A minimum increase in the score to qualify as an improvement,
i.e. an increase of less than or equal to `min_delta`, will count as no improvement.
cumulative_delta: It True, `min_delta` defines an increase since the last `patience` reset, otherwise,
it defines an increase after the last event. Default value is False.
Examples:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def score_function(engine):
val_loss = engine.state.metrics['nll']
return -val_loss
handler = EarlyStopping(patience=10, score_function=score_function, trainer=trainer)
# Note: the handler is attached to an *Evaluator* (runs one epoch on validation dataset).
evaluator.add_event_handler(Events.COMPLETED, handler)
"""
_state_dict_all_req_keys = (
"counter",
"best_score",
)
def __init__(
self,
patience: int,
score_function: Callable,
trainer: Engine,
min_delta: float = 0.0,
cumulative_delta: bool = False,
):
if not callable(score_function):
raise TypeError("Argument score_function should be a function.")
if patience < 1:
raise ValueError("Argument patience should be positive integer.")
if min_delta < 0.0:
raise ValueError("Argument min_delta should not be a negative number.")
if not isinstance(trainer, Engine):
raise TypeError("Argument trainer should be an instance of Engine.")
self.score_function = score_function
self.patience = patience
self.min_delta = min_delta
self.cumulative_delta = cumulative_delta
self.trainer = trainer
self.counter = 0
self.best_score: Optional[float] = None
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
def __call__(self, engine: Engine) -> None:
score = self.score_function(engine)
if self.best_score is None:
self.best_score = score
elif score <= self.best_score + self.min_delta:
if not self.cumulative_delta and score > self.best_score:
self.best_score = score
self.counter += 1
self.logger.debug("EarlyStopping: %i / %i" % (self.counter, self.patience))
if self.counter >= self.patience:
self.logger.info("EarlyStopping: Stop training")
self.trainer.terminate()
else:
self.best_score = score
self.counter = 0
def state_dict(self) -> "OrderedDict[str, float]":
"""Method returns state dict with ``counter`` and ``best_score``.
Can be used to save internal state of the class.
"""
return OrderedDict([("counter", self.counter), ("best_score", cast(float, self.best_score))])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replace internal state of the class with provided state dict data.
Args:
state_dict: a dict with "counter" and "best_score" keys/values.
"""
super().load_state_dict(state_dict)
self.counter = state_dict["counter"]
self.best_score = state_dict["best_score"]
|
from collections import OrderedDict
from collections.abc import Mapping
from typing import Tuple
class Serializable:
_state_dict_all_req_keys: Tuple = ()
_state_dict_one_of_opt_keys: Tuple = ()
def state_dict(self) -> OrderedDict:
raise NotImplementedError
def load_state_dict(self, state_dict: Mapping) -> None:
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
for k in self._state_dict_all_req_keys:
if k not in state_dict:
raise ValueError(
f"Required state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'"
)
opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]
if len(opts) > 0 and ((not any(opts)) or (all(opts))):
raise ValueError(f"state_dict should contain only one of '{self._state_dict_one_of_opt_keys}' keys")
|
from ignite.base.mixins import Serializable
|
# Needed to collect coverage data
|
import logging
import sys
from collections import namedtuple
import pytest
import torch
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.utils import convert_tensor, deprecated, hash_checkpoint, setup_logger, to_onehot
def test_convert_tensor():
x = torch.tensor([0.0])
tensor = convert_tensor(x)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=True)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=False)
assert torch.is_tensor(tensor)
x = [torch.tensor([0.0]), torch.tensor([0.0])]
list_ = convert_tensor(x)
assert isinstance(list_, list)
assert torch.is_tensor(list_[0])
assert torch.is_tensor(list_[1])
x = (torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, tuple)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
Point = namedtuple("Point", ["x", "y"])
x = Point(torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, Point)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
x = {"a": torch.tensor([0.0]), "b": torch.tensor([0.0])}
dict_ = convert_tensor(x)
assert isinstance(dict_, dict)
assert torch.is_tensor(dict_["a"])
assert torch.is_tensor(dict_["b"])
assert convert_tensor("a") == "a"
with pytest.raises(TypeError):
convert_tensor(12345)
def test_to_onehot():
indices = torch.tensor([0, 1, 2, 3], dtype=torch.long)
actual = to_onehot(indices, 4)
expected = torch.eye(4, dtype=torch.uint8)
assert actual.equal(expected)
y = torch.randint(0, 21, size=(1000,))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 250, 255))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 150, 155, 4, 6))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
# Test with `TorchScript`
x = torch.tensor([0, 1, 2, 3])
# Test the raw `to_onehot` function
scripted_to_onehot = torch.jit.script(to_onehot)
assert scripted_to_onehot(x, 4).allclose(to_onehot(x, 4))
# Test inside `torch.nn.Module`
class SLP(torch.nn.Module):
def __init__(self):
super(SLP, self).__init__()
self.linear = torch.nn.Linear(4, 1)
def forward(self, x):
x = to_onehot(x, 4)
return self.linear(x.to(torch.float))
eager_model = SLP()
scripted_model = torch.jit.script(eager_model)
assert eager_model(x).allclose(scripted_model(x))
def test_dist_setup_logger():
logger = setup_logger("trainer", level=logging.CRITICAL, distributed_rank=1)
assert logger.level != logging.CRITICAL
def test_setup_logger(capsys, dirname):
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
assert len(trainer.logger.handlers) == 0
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
fp = dirname / "log"
def _test(stream):
trainer.logger = setup_logger("trainer", stream=stream, filepath=fp, reset=True)
evaluator.logger = setup_logger("evaluator", stream=stream, filepath=fp, reset=True)
assert len(trainer.logger.handlers) == 2
assert len(evaluator.logger.handlers) == 2
@trainer.on(Events.EPOCH_COMPLETED)
def _(_):
evaluator.run([0, 1, 2])
trainer.run([0, 1, 2, 3, 4, 5], max_epochs=5)
captured = capsys.readouterr()
if stream is sys.stdout:
err = captured.out.split("\n")
else:
err = captured.err.split("\n")
with open(fp, "r") as h:
data = h.readlines()
for source in [err, data]:
assert "trainer INFO: Engine run starting with max_epochs=5." in source[0]
assert "evaluator INFO: Engine run starting with max_epochs=1." in source[1]
_test(stream=None)
_test(stream=sys.stderr)
_test(stream=sys.stdout)
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def _setup_a_logger_and_dump(name, message):
logger = setup_logger(name)
logger.info(message)
def test_override_setup_logger(capsys):
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert "tests.ignite.test_utils INFO: test_override_setup_logger" in source[0]
# change the logger level of _setup_a_logger_and_dump
setup_logger(name=__name__, level=logging.WARNING, reset=True)
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert source[0] == ""
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def test_deprecated():
# Test on function without docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_docs():
return 24
assert func_no_docs.__doc__ == "**Deprecated function**.\n\n .. deprecated:: 0.4.2"
# Test on function with docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_reasons():
"""Docs are cool"""
return 24
assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool.. deprecated:: 0.4.2"
# Test on function with docs, @deprecated with reasons
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_no_warnings():
"""Docs are very cool"""
return 24
assert (
func_no_warnings.__doc__
== "**Deprecated function**.\n\n Docs are very cool.. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2"
)
# Tests that the function emits DeprecationWarning
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_check_warning():
"""Docs are very ..."""
return 24
with pytest.deprecated_call():
assert func_check_warning() == 24
with pytest.warns(
DeprecationWarning,
match="This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details.",
):
# Trigger a warning.
func_check_warning()
# Test that the function raises Exception
@deprecated("0.4.2", "0.6.0", reasons=("reason1", "reason2"), raise_exception=True)
def func_with_everything():
return 1
with pytest.raises(Exception) as exec_info:
func_with_everything()
assert (
str(exec_info.value)
== "This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details."
)
def test_smoke__utils():
from ignite._utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot # noqa: F401
@pytest.mark.skipif(Version(torch.__version__) < Version("1.5.0"), reason="Skip if < 1.5.0")
def test_hash_checkpoint(tmp_path):
# download lightweight model
from torchvision.models import squeezenet1_0
model = squeezenet1_0()
torch.hub.download_url_to_file(
"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt"
)
hash_checkpoint_path, sha_hash = hash_checkpoint(f"{tmp_path}/squeezenet1_0.pt", str(tmp_path))
model.load_state_dict(torch.load(str(hash_checkpoint_path), "cpu"), True)
assert sha_hash[:8] == "b66bff10"
assert hash_checkpoint_path.name == f"squeezenet1_0-{sha_hash[:8]}.pt"
# test non-existent checkpoint_path
with pytest.raises(FileNotFoundError, match=r"not_found.pt does not exist in *"):
hash_checkpoint(f"{tmp_path}/not_found.pt", tmp_path)
|
import functools
import os
import shutil
import sys
import tempfile
import time
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
@pytest.fixture(
params=[
"cpu",
pytest.param("cuda", marks=pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no CUDA support")),
]
)
def available_device(request):
return request.param
@pytest.fixture()
def dirname():
path = Path(tempfile.mkdtemp())
yield path
shutil.rmtree(path)
@pytest.fixture()
def get_fixed_dirname(worker_id):
# multi-proc friendly fixed tmp dirname
path = "/tmp/fixed_tmp_dirname_"
lrank = int(worker_id.replace("gw", "")) if "gw" in worker_id else 0
def getter(name="test"):
nonlocal path
path += name
time.sleep(0.5 * lrank)
os.makedirs(path, exist_ok=True)
return path
yield getter
time.sleep(1.0 * lrank + 1.0)
if Path(path).exists():
shutil.rmtree(path)
# sort of sync
time.sleep(1.0)
@pytest.fixture()
def get_rank_zero_dirname(dirname):
def func():
import ignite.distributed as idist
zero_rank_dirname = Path(idist.all_gather(str(dirname))[0])
return zero_rank_dirname
yield func
@pytest.fixture(scope="module")
def local_rank(worker_id):
"""use a different account in each xdist worker"""
if "gw" in worker_id:
lrank = int(worker_id.replace("gw", ""))
elif "master" == worker_id:
lrank = 0
else:
raise RuntimeError(f"Can not get rank from worker_id={worker_id}")
os.environ["LOCAL_RANK"] = f"{lrank}"
yield lrank
del os.environ["LOCAL_RANK"]
@pytest.fixture(scope="module")
def world_size():
remove_env_var = False
if "WORLD_SIZE" not in os.environ:
os.environ["WORLD_SIZE"] = "1"
remove_env_var = True
yield int(os.environ["WORLD_SIZE"])
if remove_env_var:
del os.environ["WORLD_SIZE"]
@pytest.fixture()
def clean_env():
for k in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
if k in os.environ:
del os.environ[k]
def _create_dist_context(dist_info, lrank):
dist.init_process_group(**dist_info)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
return {"local_rank": lrank, "world_size": dist_info["world_size"], "rank": dist_info["rank"]}
def _destroy_dist_context():
if dist.get_rank() == 0:
# To support Python 3.7; Otherwise we could do `.unlink(missing_ok=True)`
try:
Path("/tmp/free_port").unlink()
except FileNotFoundError:
pass
dist.barrier()
dist.destroy_process_group()
from ignite.distributed.utils import _SerialModel, _set_model
# We need to set synced model to initial state
_set_model(_SerialModel())
def _find_free_port():
# Taken from https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def _setup_free_port(local_rank):
port_file = "/tmp/free_port"
if local_rank == 0:
port = _find_free_port()
with open(port_file, "w") as h:
h.write(str(port))
return port
else:
counter = 10
while counter > 0:
counter -= 1
time.sleep(1)
if not Path(port_file).exists():
continue
with open(port_file, "r") as h:
port = h.readline()
return int(port)
raise RuntimeError(f"Failed to fetch free port on local rank {local_rank}")
@pytest.fixture()
def distributed_context_single_node_nccl(local_rank, world_size):
free_port = _setup_free_port(local_rank)
dist_info = {
"backend": "nccl",
"world_size": world_size,
"rank": local_rank,
"init_method": f"tcp://localhost:{free_port}",
}
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
@pytest.fixture()
def distributed_context_single_node_gloo(local_rank, world_size):
from datetime import timedelta
if sys.platform.startswith("win"):
temp_file = tempfile.NamedTemporaryFile(delete=False)
# can't use backslashes in f-strings
backslash = "\\"
init_method = f'file:///{temp_file.name.replace(backslash, "/")}'
else:
free_port = _setup_free_port(local_rank)
init_method = f"tcp://localhost:{free_port}"
temp_file = None
dist_info = {
"backend": "gloo",
"world_size": world_size,
"rank": local_rank,
"init_method": init_method,
"timeout": timedelta(seconds=60),
}
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
if temp_file:
temp_file.close()
@pytest.fixture()
def multi_node_conf(local_rank):
assert "node_id" in os.environ
assert "nnodes" in os.environ
assert "nproc_per_node" in os.environ
node_id = int(os.environ["node_id"])
nnodes = int(os.environ["nnodes"])
nproc_per_node = int(os.environ["nproc_per_node"])
out = {
"world_size": nnodes * nproc_per_node,
"rank": local_rank + node_id * nproc_per_node,
"local_rank": local_rank,
}
return out
def _create_mnodes_dist_context(dist_info, mnodes_conf):
dist.init_process_group(**dist_info)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.device(mnodes_conf["local_rank"])
return mnodes_conf
def _destroy_mnodes_dist_context():
dist.barrier()
dist.destroy_process_group()
from ignite.distributed.utils import _SerialModel, _set_model
# We need to set synced model to initial state
_set_model(_SerialModel())
@pytest.fixture()
def distributed_context_multi_node_gloo(multi_node_conf):
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
dist_info = {
"backend": "gloo",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
yield _create_mnodes_dist_context(dist_info, multi_node_conf)
_destroy_mnodes_dist_context()
@pytest.fixture()
def distributed_context_multi_node_nccl(multi_node_conf):
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
os.environ["MASTER_PORT"] = str(int(os.getenv("MASTER_PORT")) + 1)
dist_info = {
"backend": "nccl",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
yield _create_mnodes_dist_context(dist_info, multi_node_conf)
_destroy_mnodes_dist_context()
def _xla_template_worker_task(index, fn, args):
import torch_xla.core.xla_model as xm
xm.rendezvous("init")
fn(index, *args)
def _xla_execute(fn, args, nprocs):
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {}
if "COLAB_TPU_ADDR" in os.environ:
spawn_kwargs["start_method"] = "fork"
try:
xmp.spawn(_xla_template_worker_task, args=(fn, args), nprocs=nprocs, **spawn_kwargs)
except SystemExit as ex_:
assert ex_.code == 0, "Didn't successfully exit in XLA test"
@pytest.fixture()
def xmp_executor():
yield _xla_execute
@pytest.fixture()
def mock_gpu_is_not_available():
from unittest.mock import patch
with patch("torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = False
yield mock_cuda
def _hvd_task_with_init(func, args):
import horovod.torch as hvd
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
func(*args)
# Added a sleep to avoid flaky failures on circle ci
# Sometimes a rank is terminated before final collective
# op is finished.
# https://github.com/pytorch/ignite/pull/2357
time.sleep(2)
hvd.shutdown()
def _gloo_hvd_execute(func, args, np=1, do_init=False):
try:
# old API
from horovod.run.runner import run
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run
kwargs = dict(use_gloo=True, num_proc=np)
if do_init:
return run(_hvd_task_with_init, args=(func, args), **kwargs)
return run(func, args=args, **kwargs)
@pytest.fixture()
def gloo_hvd_executor():
yield _gloo_hvd_execute
skip_if_no_gpu = pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
skip_if_has_not_native_dist_support = pytest.mark.skipif(
not idist.has_native_dist_support, reason="Skip if no native dist support"
)
skip_if_has_not_xla_support = pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
skip_if_has_not_horovod_support = pytest.mark.skipif(
not idist.has_hvd_support, reason="Skip if no Horovod dist support"
)
# Unlike other backends, Horovod and multi-process XLA run user code by
# providing a utility function which accepts user code as a callable argument.
# To keep distributed tests backend-agnostic, we mark Horovod and multi-process XLA
# tests during fixture preparation and replace their function with the proper one
# just before running the test. PyTest stash is a safe way to share state between
# different stages of tool runtime and we use it to mark the tests.
is_horovod_stash_key = pytest.StashKey[bool]()
is_xla_stash_key = pytest.StashKey[bool]()
is_xla_single_device_stash_key = pytest.StashKey[bool]()
@pytest.fixture(
params=[
pytest.param("nccl", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support, skip_if_no_gpu]),
pytest.param("gloo_cpu", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support]),
pytest.param("gloo", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support, skip_if_no_gpu]),
pytest.param(
"horovod",
marks=[
pytest.mark.distributed,
skip_if_has_not_horovod_support,
pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc"),
],
),
pytest.param(
"single_device_xla",
marks=[
pytest.mark.tpu,
skip_if_has_not_xla_support,
pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars"),
],
),
pytest.param(
"xla_nprocs",
marks=[
pytest.mark.tpu,
skip_if_has_not_xla_support,
pytest.mark.skipif(
"NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars"
),
],
),
],
)
def distributed(request, local_rank, world_size):
if request.param in ("nccl", "gloo_cpu", "gloo"):
if "gloo" in request.param and sys.platform.startswith("win"):
temp_file = tempfile.NamedTemporaryFile(delete=False)
# can't use backslashes in f-strings
backslash = "\\"
init_method = f'file:///{temp_file.name.replace(backslash, "/")}'
else:
temp_file = None
free_port = _setup_free_port(local_rank)
init_method = f"tcp://localhost:{free_port}"
dist_info = {
"world_size": world_size,
"rank": local_rank,
"init_method": init_method,
}
if request.param == "nccl":
dist_info["backend"] = "nccl"
else:
dist_info["backend"] = "gloo"
from datetime import timedelta
dist_info["timeout"] = timedelta(seconds=60)
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
if temp_file:
temp_file.close()
elif request.param == "horovod":
request.node.stash[is_horovod_stash_key] = True
yield None
elif request.param in ("single_device_xla", "xla_nprocs"):
request.node.stash[is_xla_stash_key] = True
request.node.stash[is_xla_single_device_stash_key] = request.param == "single_device_xla"
yield {"xla_index": -1} if request.param == "xla_nprocs" else None
else:
raise RuntimeError(f"Invalid parameter value for `distributed` fixture, given {request.param}")
@pytest.hookimpl
def pytest_pyfunc_call(pyfuncitem: pytest.Function) -> None:
if pyfuncitem.stash.get(is_horovod_stash_key, False):
def testfunc_wrapper(test_func, **kwargs):
def hvd_worker():
import horovod.torch as hvd
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
test_func(**kwargs)
hvd.shutdown()
try:
# old API
from horovod.run.runner import run
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
hvd_kwargs = dict(use_gloo=True, num_proc=nproc)
run(hvd_worker, **hvd_kwargs)
pyfuncitem.obj = functools.partial(testfunc_wrapper, pyfuncitem.obj)
elif pyfuncitem.stash.get(is_xla_stash_key, False) and not pyfuncitem.stash[is_xla_single_device_stash_key]:
def testfunc_wrapper(testfunc, **kwargs):
def xla_worker(index, fn):
import torch_xla.core.xla_model as xm
kwargs["distributed"]["xla_index"] = index
xm.rendezvous("init")
fn(**kwargs)
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {"nprocs": int(os.environ["NUM_TPU_WORKERS"])}
if "COLAB_TPU_ADDR" in os.environ:
spawn_kwargs["start_method"] = "fork"
try:
xmp.spawn(xla_worker, args=(testfunc,), **spawn_kwargs)
except SystemExit as ex_:
assert ex_.code == 0, "Didn't successfully exit in XLA test"
pyfuncitem.obj = functools.partial(testfunc_wrapper, pyfuncitem.obj)
|
import torch
def cpu_and_maybe_cuda():
return ("cpu",) + (("cuda",) if torch.cuda.is_available() else ())
|
import warnings
from functools import partial
from itertools import accumulate
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics import Accuracy, RunningAverage
from ignite.metrics.metric import RunningBatchWise, RunningEpochWise, SingleEpochRunningBatchWise
def test_wrong_input_args():
with pytest.raises(TypeError, match=r"Argument src should be a Metric or None."):
RunningAverage(src=[12, 34])
with pytest.raises(ValueError, match=r"Argument alpha should be a float between"):
RunningAverage(alpha=-1.0)
with pytest.raises(ValueError, match=r"Argument output_transform should be None if src is a Metric"):
RunningAverage(Accuracy(), output_transform=lambda x: x[0])
with pytest.raises(ValueError, match=r"Argument output_transform should not be None if src corresponds"):
RunningAverage()
with pytest.raises(ValueError, match=r"Argument device should be None if src is a Metric"):
RunningAverage(Accuracy(), device="cpu")
with pytest.warns(UserWarning, match=r"`epoch_bound` is deprecated and will be removed in the future."):
m = RunningAverage(Accuracy(), epoch_bound=True)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("epoch_bound, usage", [(False, RunningBatchWise()), (True, SingleEpochRunningBatchWise())])
def test_epoch_bound(epoch_bound, usage):
with warnings.catch_warnings():
metric = RunningAverage(output_transform=lambda _: _, epoch_bound=epoch_bound)
e1 = Engine(lambda _, __: None)
e2 = Engine(lambda _, __: None)
metric.attach(e1, "")
metric.epoch_bound = None
metric.attach(e2, "", usage)
e1._event_handlers == e2._event_handlers
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise()])
def test_integration_batchwise(usage):
torch.manual_seed(10)
alpha = 0.98
n_iters = 10
batch_size = 10
n_classes = 10
max_epochs = 3
data = list(range(n_iters))
loss = torch.arange(n_iters, dtype=torch.float)
y_true = torch.randint(0, n_classes, size=(n_iters, batch_size))
y_pred = torch.rand(n_iters, batch_size, n_classes)
accuracy_running_averages = torch.tensor(
list(
accumulate(
map(
lambda y_yp: torch.sum(y_yp[1].argmax(dim=-1) == y_yp[0]).item() / y_yp[0].size(0),
zip(
y_true if isinstance(usage, SingleEpochRunningBatchWise) else y_true.repeat(max_epochs, 1),
y_pred if isinstance(usage, SingleEpochRunningBatchWise) else y_pred.repeat(max_epochs, 1, 1),
),
),
lambda ra, acc: ra * alpha + (1 - alpha) * acc,
)
)
)
if isinstance(usage, SingleEpochRunningBatchWise):
accuracy_running_averages = accuracy_running_averages.repeat(max_epochs)
loss_running_averages = torch.tensor(
list(
accumulate(
loss if isinstance(usage, SingleEpochRunningBatchWise) else loss.repeat(max_epochs),
lambda ra, loss_item: ra * alpha + (1 - alpha) * loss_item,
)
)
)
if isinstance(usage, SingleEpochRunningBatchWise):
loss_running_averages = loss_running_averages.repeat(max_epochs)
def update_fn(_, i):
loss_value = loss[i]
y_true_batch = y_true[i]
y_pred_batch = y_pred[i]
return loss_value, y_pred_batch, y_true_batch
trainer = Engine(update_fn)
acc_metric = RunningAverage(Accuracy(output_transform=lambda x: [x[1], x[2]]), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", usage)
avg_output = RunningAverage(output_transform=lambda x: x[0], alpha=alpha)
avg_output.attach(trainer, "running_avg_loss", usage)
metric_acc_running_averages = []
metric_loss_running_averages = []
@trainer.on(Events.ITERATION_COMPLETED)
def _(engine):
metric_acc_running_averages.append(engine.state.metrics["running_avg_accuracy"])
metric_loss_running_averages.append(engine.state.metrics["running_avg_loss"])
trainer.run(data, max_epochs=3)
assert (torch.tensor(metric_acc_running_averages) == accuracy_running_averages).all()
assert (torch.tensor(metric_loss_running_averages) == loss_running_averages).all()
def test_integration_epochwise():
torch.manual_seed(10)
alpha = 0.98
n_iters = 10
batch_size = 10
n_classes = 10
max_epochs = 3
data = list(range(n_iters))
y_true = torch.randint(0, n_classes, size=(n_iters, batch_size))
y_pred = torch.rand(max_epochs, n_iters, batch_size, n_classes)
accuracy_running_averages = torch.tensor(
list(
accumulate(
map(
lambda y_pred_epoch: torch.sum(y_pred_epoch.argmax(dim=-1) == y_true).item() / y_true.numel(),
y_pred,
),
lambda ra, acc: ra * alpha + (1 - alpha) * acc,
)
)
)
def update_fn(engine, i):
y_true_batch = y_true[i]
y_pred_batch = y_pred[engine.state.epoch - 1, i]
return y_pred_batch, y_true_batch
trainer = Engine(update_fn)
acc_metric = RunningAverage(Accuracy(), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", RunningEpochWise())
metric_acc_running_averages = []
@trainer.on(Events.EPOCH_COMPLETED)
def _(engine):
metric_acc_running_averages.append(engine.state.metrics["running_avg_accuracy"])
trainer.run(data, max_epochs=3)
assert (torch.tensor(metric_acc_running_averages) == accuracy_running_averages).all()
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_multiple_attach(usage):
n_iters = 100
errD_values = iter(np.random.rand(n_iters))
errG_values = iter(np.random.rand(n_iters))
D_x_values = iter(np.random.rand(n_iters))
D_G_z1 = iter(np.random.rand(n_iters))
D_G_z2 = iter(np.random.rand(n_iters))
def update_fn(engine, batch):
return {
"errD": next(errD_values),
"errG": next(errG_values),
"D_x": next(D_x_values),
"D_G_z1": next(D_G_z1),
"D_G_z2": next(D_G_z2),
}
trainer = Engine(update_fn)
alpha = 0.98
# attach running average
monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"]
for metric in monitoring_metrics:
foo = partial(lambda x, metric: x[metric], metric=metric)
RunningAverage(alpha=alpha, output_transform=foo).attach(trainer, metric, usage)
@trainer.on(usage.COMPLETED)
def check_values(engine):
values = []
for metric in monitoring_metrics:
values.append(engine.state.metrics[metric])
values = set(values)
assert len(values) == len(monitoring_metrics)
data = list(range(n_iters))
trainer.run(data)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("epoch_bound", [True, False, None])
@pytest.mark.parametrize("src", [Accuracy(), None])
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_detach(epoch_bound, src, usage):
with warnings.catch_warnings():
m = RunningAverage(src, output_transform=(lambda _: _) if src is None else None, epoch_bound=epoch_bound)
e = Engine(lambda _, __: None)
m.attach(e, "m", usage)
for event_handlers in e._event_handlers.values():
assert len(event_handlers) != 0
m.detach(e, usage)
for event_handlers in e._event_handlers.values():
assert len(event_handlers) == 0
def test_output_is_tensor():
m = RunningAverage(output_transform=lambda x: x)
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise()])
def test_distrib_on_output(distributed, usage):
device = idist.device()
rank = idist.get_rank()
n_iters = 10
n_epochs = 3
# Data per rank
data = list(range(n_iters))
rank_loss_count = n_epochs * n_iters
all_loss_values = torch.arange(0, rank_loss_count * idist.get_world_size(), dtype=torch.float64).to(device)
loss_values = iter(all_loss_values[rank_loss_count * rank : rank_loss_count * (rank + 1)])
def update_fn(engine, batch):
loss_value = next(loss_values)
return loss_value.item()
trainer = Engine(update_fn)
alpha = 0.98
metric_device = device if device.type != "xla" else "cpu"
avg_output = RunningAverage(output_transform=lambda x: x, alpha=alpha, device=metric_device)
avg_output.attach(trainer, "running_avg_output", usage)
@trainer.on(usage.STARTED)
def reset_running_avg_output(engine):
engine.state.running_avg_output = None
@trainer.on(usage.ITERATION_COMPLETED)
def running_avg_output_update(engine):
i = engine.state.iteration - 1
o = sum([all_loss_values[i + r * rank_loss_count] for r in range(idist.get_world_size())]).item()
o /= idist.get_world_size()
if engine.state.running_avg_output is None:
engine.state.running_avg_output = o
else:
engine.state.running_avg_output = engine.state.running_avg_output * alpha + (1.0 - alpha) * o
@trainer.on(usage.COMPLETED)
def assert_equal_running_avg_output_values(engine):
it = engine.state.iteration
assert (
engine.state.running_avg_output == engine.state.metrics["running_avg_output"]
), f"{it}: {engine.state.running_avg_output} vs {engine.state.metrics['running_avg_output']}"
trainer.run(data, max_epochs=3)
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_distrib_on_metric(distributed, usage):
device = idist.device()
rank = idist.get_rank()
n_iters = 10
n_epochs = 3
batch_size = 10
n_classes = 10
def _test(metric_device):
data = list(range(n_iters))
np.random.seed(12)
all_y_true_batch_values = np.random.randint(
0, n_classes, size=(idist.get_world_size(), n_epochs * n_iters, batch_size)
)
all_y_pred_batch_values = np.random.rand(idist.get_world_size(), n_epochs * n_iters, batch_size, n_classes)
y_true_batch_values = iter(all_y_true_batch_values[rank, ...])
y_pred_batch_values = iter(all_y_pred_batch_values[rank, ...])
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
trainer = Engine(update_fn)
alpha = 0.98
acc_metric = RunningAverage(Accuracy(device=metric_device), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", usage)
running_avg_acc = [
None,
]
true_acc_metric = Accuracy(device=metric_device)
@trainer.on(Events.ITERATION_COMPLETED)
def manual_running_avg_acc(engine):
iteration = engine.state.iteration
if not isinstance(usage, RunningEpochWise) or ((iteration - 1) % n_iters) == 0:
true_acc_metric.reset()
if ((iteration - 1) % n_iters) == 0 and isinstance(usage, SingleEpochRunningBatchWise):
running_avg_acc[0] = None
for j in range(idist.get_world_size()):
output = (
torch.from_numpy(all_y_pred_batch_values[j, iteration - 1, :, :]),
torch.from_numpy(all_y_true_batch_values[j, iteration - 1, :]),
)
true_acc_metric.update(output)
if not isinstance(usage, RunningEpochWise) or (iteration % n_iters) == 0:
batch_acc = true_acc_metric._num_correct.item() * 1.0 / true_acc_metric._num_examples
if running_avg_acc[0] is None:
running_avg_acc[0] = batch_acc
else:
running_avg_acc[0] = running_avg_acc[0] * alpha + (1.0 - alpha) * batch_acc
engine.state.running_avg_acc = running_avg_acc[0]
@trainer.on(Events.ITERATION_COMPLETED)
def assert_equal_running_avg_acc_values(engine):
print(engine.state.iteration)
if not isinstance(usage, RunningEpochWise) or (
(engine.state.iteration > 1) and ((engine.state.iteration % n_iters) == 1)
):
assert (
engine.state.running_avg_acc == engine.state.metrics["running_avg_accuracy"]
), f"{engine.state.running_avg_acc} vs {engine.state.metrics['running_avg_accuracy']}"
trainer.run(data, max_epochs=3)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def test_distrib_accumulator_device(distributed):
device = idist.device()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
# Don't test the src=Metric case because compute() returns a scalar,
# so the metric doesn't accumulate on the device specified
avg = RunningAverage(output_transform=lambda x: x, device=metric_device)
assert avg._device == metric_device
# Value is None until the first update then compute call
for _ in range(3):
avg.update(torch.tensor(1.0, device=device))
avg.compute()
assert (
avg._value.device == metric_device
), f"{type(avg._value.device)}:{avg._value.device} vs {type(metric_device)}:{metric_device}"
|
from typing import Sequence, Union
import numpy as np
import pytest
import torch
from skimage.metrics import structural_similarity as ski_ssim
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import SSIM
def test_zero_div():
ssim = SSIM(data_range=1.0)
with pytest.raises(NotComputableError):
ssim.compute()
def test_invalid_ssim():
y_pred = torch.rand(1, 1, 4, 4)
y = y_pred + 0.125
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=2)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument kernel_size should be either int or a sequence of int."):
ssim = SSIM(data_range=1.0, kernel_size=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected sigma to have positive number."):
ssim = SSIM(data_range=1.0, sigma=(-1, -1))
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have the same shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have BxCxHxW shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y, y))
ssim.compute()
with pytest.raises(TypeError, match=r"Expected y_pred and y to have the same data type."):
y = y.double()
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
@pytest.mark.parametrize(
"shape, kernel_size, gaussian, use_sample_covariance",
[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]],
)
def test_ssim(available_device, shape, kernel_size, gaussian, use_sample_covariance):
y_pred = torch.rand(shape, device=available_device)
y = y_pred * 0.8
compare_ssim_ignite_skiimg(
y_pred,
y,
available_device,
kernel_size=kernel_size,
gaussian=gaussian,
use_sample_covariance=use_sample_covariance,
)
def compare_ssim_ignite_skiimg(
y_pred: torch.Tensor,
y: torch.Tensor,
device: torch.device,
precision: float = 2e-5, # default to float32 expected precision
*,
skimg_y_pred: Union[np.ndarray, None] = None,
skimg_y: Union[np.ndarray, None] = None,
data_range: float = 1.0,
kernel_size: Union[int, Sequence[int]] = 11,
gaussian: bool = True,
use_sample_covariance: bool = False,
):
sigma = 1.5
ssim = SSIM(data_range=data_range, sigma=sigma, device=device)
ssim.update((y_pred, y))
ignite_ssim = ssim.compute()
if y_pred.dtype == torch.bfloat16:
y_pred = y_pred.to(dtype=torch.float16)
if skimg_y_pred is None:
skimg_y_pred = y_pred.cpu().numpy()
if skimg_y is None:
skimg_y = skimg_y_pred * 0.8
skimg_ssim = ski_ssim(
skimg_y_pred,
skimg_y,
win_size=kernel_size,
sigma=sigma,
channel_axis=1,
gaussian_weights=gaussian,
data_range=data_range,
use_sample_covariance=use_sample_covariance,
)
assert isinstance(ignite_ssim, float)
assert np.allclose(ignite_ssim, skimg_ssim, atol=precision)
@pytest.mark.parametrize(
"metric_device, y_pred_device",
[
[torch.device("cpu"), torch.device("cpu")],
[torch.device("cpu"), torch.device("cuda")],
[torch.device("cuda"), torch.device("cpu")],
[torch.device("cuda"), torch.device("cuda")],
],
)
def test_ssim_device(available_device, metric_device, y_pred_device):
if available_device == "cpu":
pytest.skip("This test requires a cuda device.")
data_range = 1.0
sigma = 1.5
shape = (12, 5, 256, 256)
ssim = SSIM(data_range=data_range, sigma=sigma, device=metric_device)
y_pred = torch.rand(shape, device=y_pred_device)
y = y_pred * 0.8
if metric_device == torch.device("cuda") and y_pred_device == torch.device("cpu"):
with pytest.warns(UserWarning):
ssim.update((y_pred, y))
else:
ssim.update((y_pred, y))
if metric_device == torch.device("cuda") or y_pred_device == torch.device("cuda"):
# A tensor will always have the device index set
excepted_device = torch.device("cuda:0")
else:
excepted_device = torch.device("cpu")
assert ssim._kernel.device == excepted_device
def test_ssim_variable_batchsize(available_device):
# Checks https://github.com/pytorch/ignite/issues/2532
sigma = 1.5
data_range = 1.0
ssim = SSIM(data_range=data_range, sigma=sigma)
y_preds = [
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(8, 3, 28, 28, device=available_device),
torch.rand(16, 3, 28, 28, device=available_device),
torch.rand(1, 3, 28, 28, device=available_device),
torch.rand(30, 3, 28, 28, device=available_device),
]
y_true = [v * 0.8 for v in y_preds]
for y_pred, y in zip(y_preds, y_true):
ssim.update((y_pred, y))
out = ssim.compute()
ssim.reset()
ssim.update((torch.cat(y_preds), torch.cat(y_true)))
expected = ssim.compute()
assert np.allclose(out, expected)
def test_ssim_variable_channel(available_device):
y_preds = [
torch.rand(12, 5, 28, 28, device=available_device),
torch.rand(12, 4, 28, 28, device=available_device),
torch.rand(12, 7, 28, 28, device=available_device),
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(12, 11, 28, 28, device=available_device),
torch.rand(12, 6, 28, 28, device=available_device),
]
y_true = [v * 0.8 for v in y_preds]
for y_pred, y in zip(y_preds, y_true):
compare_ssim_ignite_skiimg(y_pred, y, available_device)
@pytest.mark.parametrize(
"dtype, precision", [(torch.bfloat16, 2e-3), (torch.float16, 4e-4), (torch.float32, 2e-5), (torch.float64, 2e-5)]
)
def test_cuda_ssim_dtypes(available_device, dtype, precision):
# Checks https://github.com/pytorch/ignite/pull/3034
if available_device == "cpu" and dtype in [torch.float16, torch.bfloat16]:
pytest.skip(reason=f"Unsupported dtype {dtype} on CPU device")
shape = (12, 3, 28, 28)
y_pred = torch.rand(shape, device=available_device, dtype=dtype)
y = y_pred * 0.8
compare_ssim_ignite_skiimg(y_pred, y, available_device, precision)
@pytest.mark.parametrize(
"shape, kernel_size, gaussian, use_sample_covariance",
[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]],
)
def test_ssim_uint8(available_device, shape, kernel_size, gaussian, use_sample_covariance):
y_pred = torch.randint(0, 255, shape, device=available_device, dtype=torch.uint8)
y = (y_pred * 0.8).to(dtype=torch.uint8)
sigma = 1.5
data_range = 255
ssim = SSIM(data_range=data_range, sigma=sigma, device=available_device)
ssim.update((y_pred, y))
ignite_ssim = ssim.compute()
skimg_pred = y_pred.cpu().numpy()
skimg_y = (skimg_pred * 0.8).astype(np.uint8)
skimg_ssim = ski_ssim(
skimg_pred,
skimg_y,
win_size=kernel_size,
sigma=sigma,
channel_axis=1,
gaussian_weights=gaussian,
data_range=data_range,
use_sample_covariance=use_sample_covariance,
)
assert isinstance(ignite_ssim, float)
assert np.allclose(ignite_ssim, skimg_ssim, atol=1e-5)
@pytest.mark.parametrize("metric_device", ["cpu", "process_device"])
def test_distrib_integration(distributed, metric_device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
n_iters = 100
batch_size = 10
device = idist.device()
if metric_device == "process_device":
metric_device = device if device.type != "xla" else "cpu"
y_pred = torch.rand(n_iters * batch_size, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size, ...],
y[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
SSIM(data_range=1.0, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(
np_pred,
np_true,
win_size=11,
sigma=1.5,
channel_axis=1,
gaussian_weights=True,
data_range=1.0,
use_sample_covariance=False,
)
tol = 1e-3 if device.type == "xla" else 1e-4 # Isn't better to ask `distributed` about backend info?
assert pytest.approx(res, abs=tol) == true_res
engine = Engine(update)
SSIM(data_range=1.0, gaussian=False, kernel_size=7, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(np_pred, np_true, win_size=7, channel_axis=1, gaussian_weights=False, data_range=1.0)
assert pytest.approx(res, abs=tol) == true_res
@pytest.mark.parametrize("metric_device", [torch.device("cpu"), "process_device"])
def test_distrib_accumulator_device(distributed, metric_device):
device = idist.device()
if metric_device == "process_device":
metric_device = torch.device(device if device.type != "xla" else "cpu")
ssim = SSIM(data_range=1.0, device=metric_device)
assert ssim._kernel is None
assert isinstance(ssim._kernel_2d, torch.Tensor)
for dev in [ssim._device, ssim._kernel_2d.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
ssim.update((y_pred, y))
dev = ssim._sum_of_ssim.device
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
|
import numbers
import os
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
from pytest import approx, raises
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.metrics import ConfusionMatrix, Precision, Recall
from ignite.metrics.metric import (
BatchFiltered,
BatchWise,
EpochWise,
Metric,
reinit__is_reduced,
RunningBatchWise,
RunningEpochWise,
SingleEpochRunningBatchWise,
sync_all_reduce,
)
class DummyMetric1(Metric):
def __init__(self, true_output, output_transform=lambda x: x):
super(DummyMetric1, self).__init__(output_transform=output_transform)
self.true_output = true_output
def reset(self):
pass
def compute(self):
pass
def update(self, output):
assert output == self.true_output
def test_no_transform():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_transform():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
def transform(output):
pred_dict, target_dict = output
return pred_dict["y"], target_dict["y"]
metric = DummyMetric1(true_output=(y_pred, y), output_transform=transform)
state = State(output=({"y": y_pred}, {"y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_output_as_mapping_wrong_keys():
metric = DummyMetric1(true_output=(0, 1))
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(
ValueError, match=r"When transformed engine's output is a mapping, " r"it should contain \('y_pred', 'y'\) keys"
):
metric.iteration_completed(engine)
def test_output_as_mapping_keys_is_none():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
metric = DummyMetric()
assert metric.required_output_keys is None
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(TypeError, match=r"Transformed engine output for DummyMetric metric should be a tuple/list"):
metric.iteration_completed(engine)
def test_output_as_mapping():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=({"y_pred": y_pred, "y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_no_grad():
y_pred = torch.zeros(4, requires_grad=True)
y = torch.zeros(4, requires_grad=False)
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
y_pred, y = output
mse = torch.pow(y_pred - y.view_as(y_pred), 2)
assert y_pred.requires_grad
assert not mse.requires_grad
metric = DummyMetric()
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_arithmetics():
class ListGatherMetric(Metric):
def __init__(self, index):
self.index = index
super(ListGatherMetric, self).__init__()
def reset(self):
self.list_ = []
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
# __add__
m0_plus_m1 = m0 + m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_plus_m1.compute() == 11
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_plus_m1.compute() == 22
m2_plus_2 = m2 + 2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
m2_plus_2 = 2 + m2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
# __sub__
m0_minus_m1 = m0 - m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_minus_m1.compute() == -9
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_minus_m1.compute() == -18
m2_minus_2 = m2 - 2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == 98
m2_minus_2 = 2 - m2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == -98
# __mul__
m0_times_m1 = m0 * m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_times_m1.compute() == 10
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_times_m1.compute() == 40
m2_times_2 = m2 * 2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
m2_times_2 = 2 * m2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
# __pow__
m0_pow_m1 = m0**m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_pow_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_pow_m1.compute() == 2**20
m2_pow_2 = m2**2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 10000
m2_pow_2 = 0.99**m2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 0.3660323412732292
# __mod__
m0_mod_m1 = m0 % m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_mod_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_mod_m1.compute() == 2
m2_mod_2 = m2 % 2
m2.update([1, 10, 100])
assert m2_mod_2.compute() == 0
# __truediv__
m0_truediv_m1 = m0 / m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2 / 2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = 200 / m2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
m0_truediv_m1 = m0.__truediv__(m1)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2.__truediv__(2)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = m2.__rtruediv__(200)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
# __floordiv__
m0_floordiv_m1 = m0 // m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_floordiv_m1.compute() == 0
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_floordiv_m1.compute() == 0
m2_floordiv_2 = m2 // 2
m2.update([1, 10, 100])
assert m2_floordiv_2.compute() == 50
def test_attach():
class CountMetric(Metric):
def __init__(self, value):
self.reset_count = 0
super(CountMetric, self).__init__()
self.reset_count = 0
self.compute_count = 0
self.update_count = 0
self.value = value
def reset(self):
self.reset_count += 1
def compute(self):
self.compute_count += 1
return self.value
def update(self, output):
self.update_count += 1
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = CountMetric(123)
m2 = CountMetric(456)
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
engine.run(range(10), 5)
assert engine.state.metrics["m1"] == 123
assert engine.state.metrics["m2_1"] == 456
assert engine.state.metrics["m2_2"] == 456
assert m1.reset_count == 5
assert m1.compute_count == 5
assert m1.update_count == 50
assert m2.reset_count == 5
assert m2.compute_count == 10
assert m2.update_count == 50
assert m1.is_attached(engine)
assert m2.is_attached(engine)
def test_detach():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = DummyMetric()
m2 = DummyMetric()
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
m1.detach(engine)
m2.detach(engine)
engine.run(range(10), 5)
assert "m1" not in engine.state.metrics
assert "m2_1" not in engine.state.metrics
assert "m2_2" not in engine.state.metrics
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
def test_integration():
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, np.argmax(y_pred, axis=-1), average=None)
recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None)
f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average=None)
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
f1 = state.metrics["f1"].numpy()
assert precision_true == approx(precision), f"{precision_true} vs {precision}"
assert recall_true == approx(recall), f"{recall_true} vs {recall}"
assert f1_true == approx(f1), f"{f1_true} vs {f1}"
def test_abstract_class():
with raises(TypeError):
Metric()
def test_pytorch_operators():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.rand(15, 10, 5).float()
y = torch.randint(0, 5, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision(average=False)
precision_2 = Precision(average=False)
norm_summed_precision = (precision_1 + precision_2).norm(p=10)
def compute_true_norm_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred, average=None)
p2 = precision_score(y, y_pred, average=None)
return np.linalg.norm(p1 + p2, ord=10)
_test(norm_summed_precision, "mean summed precision", compute_true_value_fn=compute_true_norm_summed_precision)
precision = Precision(average=False)
recall = Recall(average=False)
sum_precision_recall = (precision + recall).sum()
def compute_sum_precision_recall(y_pred, y):
p = precision_score(y, y_pred, average=None)
r = recall_score(y, y_pred, average=None)
return np.sum(p + r)
_test(sum_precision_recall, "sum precision recall", compute_true_value_fn=compute_sum_precision_recall)
precision = Precision(average=False)
recall = Recall(average=False)
f1 = (precision * recall * 2 / (precision + recall + 1e-20)).mean()
def compute_f1(y_pred, y):
f1 = f1_score(y, y_pred, average="macro")
return f1
_test(f1, "f1", compute_true_value_fn=compute_f1)
def test_indexing_metric():
def _test(ignite_metric, sklearn_metic, sklearn_args, index, num_classes=5):
y_pred = torch.rand(15, 10, num_classes).float()
y = torch.randint(0, num_classes, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
metrics = {"metric": ignite_metric[index], "metric_wo_index": ignite_metric}
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
sklearn_output = sklearn_metic(
y.view(-1).numpy(), y_pred.view(-1, num_classes).argmax(dim=1).numpy(), **sklearn_args
)
assert (state.metrics["metric_wo_index"][index] == state.metrics["metric"]).all()
assert np.allclose(state.metrics["metric"].numpy(), sklearn_output)
num_classes = 5
labels = list(range(0, num_classes, 2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(0, num_classes, 2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
# np.ix_ is used to allow for a 2D slice of a matrix. This is required to get accurate result from
# ConfusionMatrix. ConfusionMatrix must be sliced the same row-wise and column-wise.
labels = list(range(0, num_classes, 2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = list(range(num_classes - 1, 0, -2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = [1]
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
class DummyMetric2(Metric):
@reinit__is_reduced
def reset(self):
pass
def compute(self):
pass
@reinit__is_reduced
def update(self, output):
pass
def _test_compute_with_sync_all_reduce_doesnt_change_attributes(device):
class DummyMetric3(Metric):
@reinit__is_reduced
def reset(self):
self.a = torch.tensor(0.0, device=self._device)
self.b = 0.0
def update(self, output):
self.a += torch.tensor(1.0)
self.b += 1.0
@sync_all_reduce("a", "b")
def compute(self):
return self.a.item(), self.b
metric_device = device if torch.device(device).type != "xla" else "cpu"
metric = DummyMetric3(device=metric_device)
metric.update(None)
assert metric.a.item() == metric.b == 1.0
metric.compute()
assert metric.a.item() == metric.b == 1.0
def _test_invalid_sync_all_reduce(device):
class InvalidMetric(Metric):
@reinit__is_reduced
def reset(self):
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], requires_grad=False)
self.c = 0.0
self.n = 0
self.m = -1
self.d = "a string"
def compute(self):
pass
def update(self):
pass
@sync_all_reduce("a:sum")
def invalid_reduction_op_1(self):
pass
@sync_all_reduce("c:MaX")
def invalid_reduction_op_2(self):
pass
@sync_all_reduce("n:MINN")
def invalid_reduction_op_3(self):
pass
@sync_all_reduce("m:PROduCT")
def invalid_reduction_op_4(self):
pass
@sync_all_reduce("missingattr")
def invalid_reduction_op_5(self):
pass
@sync_all_reduce("d")
def invalid_reduction_op_6(self):
pass
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = InvalidMetric(device=metric_device)
m.reset()
if idist.get_world_size() > 1:
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_1()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_2()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_3()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_4()
with pytest.raises(ValueError, match=r"has no attribute named `missingattr`."):
m.invalid_reduction_op_5()
with pytest.raises(
TypeError, match=r"Attribute provided to sync_all_reduce should be a number or tensor but `d`"
):
m.invalid_reduction_op_6()
def _test_distrib_sync_all_reduce_decorator(device):
class DummyMetric(Metric):
@reinit__is_reduced
def reset(self):
# SUM op
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], device=self._device, requires_grad=False)
self.a_nocomp = self.a.clone().to("cpu")
self.b = torch.tensor(1.0, dtype=torch.float64, device=self._device, requires_grad=False)
self.b_nocomp = self.b.clone().to("cpu")
self.c = 0.0
self.c_nocomp = self.c
self.n = 0
self.n_nocomp = self.n
# MAX op
self.m = -1
# MIN op
self.k = 10000
# initialize number of updates to test (MAX, MIN) ops
self.num_updates = 0
# PRODUCT op
self.prod = torch.tensor([2.0, 3.0], device=self._device, requires_grad=False)
self.prod_nocomp = self.prod.clone().to("cpu")
@sync_all_reduce("a", "b", "c", "n:SUM", "m:MAX", "k:MIN", "prod:PRODUCT")
def compute(self):
assert (self.a.cpu() == (self.a_nocomp + 10) * idist.get_world_size()).all()
assert (self.b.cpu() == (self.b_nocomp - 5) * idist.get_world_size()).all()
assert self.c == pytest.approx((self.c_nocomp + 1.23456) * idist.get_world_size())
assert self.n == (self.n_nocomp + 1) * idist.get_world_size()
assert self.m == self.num_updates * (idist.get_world_size() - 1) - 1
assert self.k == 10000 - self.num_updates * (idist.get_world_size() - 1)
temp_prod_nocomp = 5 * self.prod_nocomp # new variable for the recomputing
temp_prod_nocomp = temp_prod_nocomp.pow(idist.get_world_size())
assert (self.prod.cpu() == temp_prod_nocomp).all()
@reinit__is_reduced
def update(self, output):
# SUM op
self.n += 1
self.c += 1.23456
self.a += 10.0
self.b -= 5.0
# MAX op
self.m += idist.get_rank()
# MIN op
self.k -= idist.get_rank()
# numper of updates for (MAX, MIN) ops
self.num_updates += 1
# PRODUCT op
self.prod *= 5
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = DummyMetric(device=metric_device)
m.update(None)
m.compute()
# check if attributes are restored to their original values after previous `compute`
m.compute()
def _test_creating_on_xla_fails(device):
with pytest.raises(ValueError, match=r"Cannot create metric on an XLA device. Use device='cpu' instead."):
DummyMetric2(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_sync_all_reduce_decorator, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_invalid_sync_all_reduce, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_compute_with_sync_all_reduce_doesnt_change_attributes, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
def test_completed():
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
m = DummyMetric()
# tensor
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value=torch.tensor(1.0))
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": 1.0}
assert isinstance(engine.state.metrics["metric"], numbers.Number)
# mapping
engine = MagicMock(state=State(metrics={}))
metrics = {"foo": 1, "bar": torch.tensor(2.0), "baz": {"qux": "quux"}}
m.compute = MagicMock(return_value=metrics)
with pytest.raises(ValueError, match=r"Argument name 'foo' is conflicting with mapping keys"):
m.completed(engine, "foo")
m.completed(engine, "metric")
metrics["metric"] = metrics
assert engine.state.metrics == metrics
# other
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value="foo")
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": "foo"}
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_completed_on_cuda():
# Checks https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
return torch.tensor([1.0, 2.0, 3.0], device="cuda")
def update(self, output):
pass
m = DummyMetric()
# tensor
engine = MagicMock(state=State(metrics={}))
m.completed(engine, "metric")
assert "metric" in engine.state.metrics
assert isinstance(engine.state.metrics["metric"], torch.Tensor)
assert engine.state.metrics["metric"].device.type == "cpu"
def test_usage_exception():
engine = Engine(lambda e, b: b)
m = DummyMetric2()
with pytest.raises(TypeError, match=r"Unhandled usage type"):
m.attach(engine, "dummy", usage=1)
with pytest.raises(
ValueError,
match=r"usage should be '\(Running\)EpochWise.usage_name' or '\(\(SingleEpoch\)Running\)BatchWise.usage_name'",
):
m.attach(engine, "dummy", usage="fake")
class DummyAccumulateInListMetric(Metric):
def __init__(self):
super(DummyAccumulateInListMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
@pytest.mark.parametrize("usage", ["epoch_wise", EpochWise.usage_name, EpochWise()])
def test_epochwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateInListMetric()
m.attach(engine, "ewm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
ewm = engine.state.metrics["ewm"]
assert len(ewm) == 3
assert ewm == [0, 1, 2]
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
class DummyAccumulateMetric(Metric):
def __init__(self):
super(DummyAccumulateMetric, self).__init__()
self.value = 0
def reset(self):
self.value = 0
def compute(self):
return self.value
def update(self, output):
self.value += output
@pytest.mark.parametrize("usage", ["running_epoch_wise", RunningEpochWise.usage_name, RunningEpochWise()])
def test_running_epochwise_usage(usage):
engine = Engine(lambda e, b: e.state.metrics["ewm"])
engine.state.metrics["ewm"] = 0
@engine.on(Events.EPOCH_STARTED)
def _():
engine.state.metrics["ewm"] += 1
m = DummyAccumulateMetric()
m.attach(engine, "rewm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rewm"] == sum(range(engine.state.epoch + 1))
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize("usage", ["batch_wise", BatchWise.usage_name, BatchWise()])
def test_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateInListMetric()
m.attach(engine, "bwm", usage=usage)
@engine.on(Events.ITERATION_COMPLETED)
def _():
bwm = engine.state.metrics["bwm"]
assert len(bwm) == 1
assert bwm[0] == (engine.state.iteration - 1) % 3
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize("usage", ["running_batch_wise", RunningBatchWise.usage_name, RunningBatchWise()])
def test_running_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateMetric()
m.attach(engine, "rbwm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rbwm"] == 6 * engine.state.epoch
engine.run([0, 1, 2, 3], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize(
"usage", ["single_epoch_running_batch_wise", SingleEpochRunningBatchWise.usage_name, SingleEpochRunningBatchWise()]
)
def test_single_epoch_running_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateMetric()
m.attach(engine, "rbwm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rbwm"] == 6
engine.run([0, 1, 2, 3], max_epochs=10)
m.detach(engine, usage=usage)
def test_batchfiltered_usage():
class MyMetric(Metric):
def __init__(self):
super(MyMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
engine = Engine(lambda e, b: b)
m = MyMetric()
usage = BatchFiltered(every=2)
m.attach(engine, "bfm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
bfm = engine.state.metrics["bfm"]
assert len(bfm) == 2
assert bfm[0] == 1
engine.run([0, 1, 2, 3], max_epochs=10)
def test_override_required_output_keys():
# https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/5
import torch.nn as nn
from ignite.engine import create_supervised_evaluator
counter = [0]
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, x = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert x.shape == (4, 10)
assert x.equal(data[counter[0]][0])
assert y.equal(data[counter[0]][1])
counter[0] += 1
def reset(self):
pass
def compute(self):
pass
model = nn.Linear(10, 3)
metrics = {"Precision": Precision(), "CustomMetric": CustomMetric()}
evaluator = create_supervised_evaluator(
model, metrics=metrics, output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)
@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)]])
def test_list_of_tensors_and_numbers(shapes):
def check_fn(output):
assert len(output) == 2
assert isinstance(output[0], torch.Tensor)
assert isinstance(output[1], torch.Tensor)
assert output[0].shape == (1,) + shapes[0]
assert output[1].shape == (1,) + shapes[1]
def get_data(gt_as_scalar=False):
return [
(
[torch.rand(shapes[0]) for _ in range(3 + i)], # predictions
[
torch.rand(shapes[1]).item() if gt_as_scalar else torch.rand(shapes[1]) for _ in range(3 + i)
], # ground truth
)
for i in range(5)
]
class MyMetric(Metric):
def __init__(self, check_fn):
super(MyMetric, self).__init__()
self.check_fn = check_fn
def reset(self):
pass
def compute(self):
pass
def update(self, output):
self.check_fn(output)
engine = Engine(lambda e, b: b)
m = MyMetric(check_fn)
m.attach(engine, "m")
data = get_data()
engine.run(data)
if len(shapes[1]) == 0:
data = get_data(gt_as_scalar=True)
engine.run(data)
def test_list_of_tensors_and_numbers_unsupported_output():
class MyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
engine = Engine(lambda e, b: ([0, 1, 2], [0, 1, 2], [0, 1, 2]))
m = MyMetric()
m.attach(engine, "m")
with pytest.raises(ValueError, match=r"Output should have 2 items of the same length"):
engine.run([0] * 10)
engine = Engine(lambda e, b: ([0, 1, 2], [0, 1, 2, 4]))
m = MyMetric()
m.attach(engine, "m")
with pytest.raises(ValueError, match=r"Output should have 2 items of the same length"):
engine.run([0] * 10)
class DummyMetric4(Metric):
_state_dict_all_req_keys = ("dnumber", "fnumber", "tensor")
def __init__(self, value: int):
super().reset()
self.dnumber = value
self.fnumber = float(value + 1)
self.tensor = torch.tensor([value + 2])
def reset(self):
self.dnumber = -1
self.fnumber = -2.0
self.tensor = torch.tensor([-3])
def update(self, output):
pass
def compute(self):
pass
def test_wrong_state_dict():
class WrongMetric(Metric):
_state_dict_all_req_keys = ("object",)
def __init__(self, value):
super().__init__()
self.object = {"a": [value]}
def reset(self):
pass
def update(self, output):
pass
def compute(self):
pass
metric = WrongMetric(2)
with pytest.raises(TypeError, match="Currently, only numeric or tensor-typed attributes of the metric"):
metric.state_dict()
delattr(metric, "object")
with pytest.raises(ValueError, match="Found a value in _state_dict_all_req_keys that is not among"):
metric.state_dict()
def test_state_dict():
metric = DummyMetric4(1)
state = metric.state_dict()
assert state.keys() == {"dnumber", "fnumber", "tensor"}
metric.reset()
metric.load_state_dict(state)
assert metric.dnumber == 1
assert metric.fnumber == 2
assert metric.tensor == torch.tensor([3])
def _test_distrib_state_dict(device):
rank = idist.get_local_rank()
metric = DummyMetric4(rank)
state = metric.state_dict()
assert isinstance(state["dnumber"][rank], int)
assert isinstance(state["fnumber"][rank], float)
metric.reset()
metric.load_state_dict(state)
assert metric.dnumber == rank and isinstance(metric.dnumber, int)
assert metric.fnumber == rank + 1 and isinstance(metric.fnumber, float)
assert metric.tensor == torch.tensor([rank + 2])
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import RootMeanSquaredError
def test_zero_sample():
rmse = RootMeanSquaredError()
with pytest.raises(
NotComputableError, match=r"MeanSquaredError must have at least one example before it can be computed"
):
rmse.compute()
@pytest.fixture(params=[0, 1, 2, 3])
def test_data(request):
return [
(torch.empty(10).uniform_(0, 10), torch.empty(10).uniform_(0, 10), 1),
(torch.empty(10, 1).uniform_(-10, 10), torch.empty(10, 1).uniform_(-10, 10), 1),
# updated batches
(torch.empty(50).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16),
(torch.empty(50, 1).uniform_(-10, 10), torch.empty(50, 1).uniform_(-10, 10), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(3))
def test_compute(n_times, test_data):
rmse = RootMeanSquaredError()
y_pred, y, batch_size = test_data
rmse.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
rmse.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
rmse.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
np_res = np.sqrt(np.power((np_y - np_y_pred), 2.0).sum() / np_y.shape[0])
res = rmse.compute()
assert isinstance(res, float)
assert pytest.approx(res) == np_res
def _test_distrib_integration(device, tol=1e-6):
from ignite.engine import Engine
rank = idist.get_rank()
def _test(metric_device):
n_iters = 2
batch_size = 3
torch.manual_seed(12 + rank)
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = (rank + 1) * torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return y_preds[i * batch_size : (i + 1) * batch_size], y_true[i * batch_size : (i + 1) * batch_size]
engine = Engine(update)
m = RootMeanSquaredError(device=metric_device)
m.attach(engine, "rmse")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "rmse" in engine.state.metrics
res = engine.state.metrics["rmse"]
true_res = np.sqrt(np.mean(np.square((y_true - y_preds).cpu().numpy())))
assert pytest.approx(res, rel=tol) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import dill
from ignite.metrics import Metric
class Accumulation(Metric):
def __init__(self):
self.value = 0
super(Accumulation, self).__init__()
def reset(self):
self.value = 0
def compute(self):
return self.value
def update(self, output):
self.value += output
def test_metric():
def _test(m, values, e):
for v in values:
m.update(v)
assert m.compute() == e
metric = Accumulation()
m1 = dill.loads(dill.dumps(metric))
values = list(range(10))
expected = sum(values)
_test(m1, values, expected)
metric.update(5)
m2 = dill.loads(dill.dumps(metric))
_test(m2, values, expected + 5)
|
import json
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics.classification_report import ClassificationReport
def _test_integration_multiclass(device, output_dict):
rank = idist.get_rank()
def _test(metric_device, n_classes, labels=None):
classification_report = ClassificationReport(device=metric_device, output_dict=output_dict, labels=labels)
n_iters = 80
batch_size = 16
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
classification_report.attach(engine, "cr")
data = list(range(n_iters))
engine.run(data=data)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cr" in engine.state.metrics
res = engine.state.metrics["cr"]
res2 = classification_report.compute()
assert res == res2
assert isinstance(res, dict if output_dict else str)
if not output_dict:
res = json.loads(res)
from sklearn.metrics import classification_report as sklearn_classification_report
sklearn_result = sklearn_classification_report(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), output_dict=True, zero_division=1
)
for i in range(n_classes):
label_i = labels[i] if labels else str(i)
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
for i in range(5):
torch.manual_seed(12 + rank + i)
# check multiple random inputs as random exact occurencies are rare
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device, 2, ["label0", "label1"])
_test(metric_device, 2)
_test(metric_device, 3, ["label0", "label1", "label2"])
_test(metric_device, 3)
_test(metric_device, 4, ["label0", "label1", "label2", "label3"])
_test(metric_device, 4)
def _test_integration_multilabel(device, output_dict):
rank = idist.get_rank()
def _test(metric_device, n_epochs, labels=None):
classification_report = ClassificationReport(device=metric_device, output_dict=output_dict, is_multilabel=True)
n_iters = 10
batch_size = 16
n_classes = 7
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
classification_report.attach(engine, "cr")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cr" in engine.state.metrics
res = engine.state.metrics["cr"]
res2 = classification_report.compute()
assert res == res2
assert isinstance(res, dict if output_dict else str)
if not output_dict:
res = json.loads(res)
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
from sklearn.metrics import classification_report as sklearn_classification_report
sklearn_result = sklearn_classification_report(np_y_true, np_y_preds, output_dict=True, zero_division=1)
for i in range(n_classes):
torch.manual_seed(12 + rank + i)
label_i = labels[i] if labels else str(i)
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
for _ in range(3):
# check multiple random inputs as random exact occurencies are rare
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device, 1)
_test(metric_device, 2)
_test(metric_device, 1, ["0", "1", "2", "3", "4", "5", "6"])
_test(metric_device, 2, ["0", "1", "2", "3", "4", "5", "6"])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_gloo):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_integration_multiclass, (device, True), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multiclass, (device, False), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multilabel, (device, True), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multilabel, (device, False), np=nproc, do_init=True)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import ConfusionMatrix, IoU, JaccardIndex, mIoU
from ignite.metrics.confusion_matrix import cmAccuracy, cmPrecision, cmRecall, DiceCoefficient
torch.manual_seed(12)
def test_no_update():
cm = ConfusionMatrix(10)
with pytest.raises(NotComputableError, match=r"Confusion matrix must have at least one example before it "):
cm.compute()
def test_num_classes_wrong_input():
with pytest.raises(ValueError, match="Argument num_classes needs to be > 1"):
ConfusionMatrix(num_classes=1)
def test_multiclass_wrong_inputs():
cm = ConfusionMatrix(10)
with pytest.raises(
ValueError, match=r"y_pred must have shape \(batch_size, num_classes " r"\(currently set to 10\), ...\)"
):
cm.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y_pred does not have correct number of classes:"):
cm.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(
ValueError,
match=r"y_pred must have shape \(batch_size, num_classes "
r"\(currently set to 10\), ...\) "
r"and y must have ",
):
cm.update((torch.rand(4, 10, 12, 12), torch.randint(0, 10, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes."):
cm.update((torch.rand(4, 10, 12, 14), torch.randint(0, 10, size=(4, 5, 6)).long()))
with pytest.raises(ValueError, match=r"Argument average can None or one of"):
ConfusionMatrix(num_classes=10, average="abc")
with pytest.raises(ValueError, match=r"Argument average should be one of 'samples', 'recall', 'precision'"):
ConfusionMatrix.normalize(None, None)
@pytest.fixture(params=[item for item in range(10)])
def test_data(request):
return [
# Multiclass input data of shape (N, )
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)).long(), 4, 1),
(torch.rand(4, 10), torch.randint(0, 10, size=(4,)).long(), 10, 1),
(torch.rand(4, 2), torch.randint(0, 2, size=(4,)).long(), 2, 1),
(torch.rand(100, 5), torch.randint(0, 5, size=(100,)).long(), 5, 16),
# Multiclass input data of shape (N, L)
(torch.rand(10, 4, 5), torch.randint(0, 4, size=(10, 5)).long(), 4, 1),
(torch.rand(4, 10, 5), torch.randint(0, 10, size=(4, 5)).long(), 10, 1),
(torch.rand(100, 9, 7), torch.randint(0, 9, size=(100, 7)).long(), 9, 16),
# Multiclass input data of shape (N, H, W, ...)
(torch.rand(4, 5, 12, 10), torch.randint(0, 5, size=(4, 12, 10)).long(), 5, 1),
(torch.rand(4, 5, 10, 12, 8), torch.randint(0, 5, size=(4, 10, 12, 8)).long(), 5, 1),
(torch.rand(100, 3, 8, 8), torch.randint(0, 3, size=(100, 8, 8)).long(), 3, 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multiclass_input(n_times, test_data):
y_pred, y, num_classes, batch_size = test_data
cm = ConfusionMatrix(num_classes=num_classes)
cm.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
cm.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
cm.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert np.all(confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) == cm.compute().numpy())
def test_ignored_out_of_num_classes_indices():
num_classes = 21
cm = ConfusionMatrix(num_classes=num_classes)
y_pred = torch.rand(4, num_classes, 12, 10)
y = torch.randint(0, 255, size=(4, 12, 10)).long()
cm.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert np.all(confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) == cm.compute().numpy())
def get_y_true_y_pred():
# Generate an image with labels 0 (background), 1, 2
# 3 classes:
y_true = np.zeros((30, 30), dtype=np.int32)
y_true[1:11, 1:11] = 1
y_true[15:25, 15:25] = 2
y_pred = np.zeros((30, 30), dtype=np.int32)
y_pred[5:15, 1:11] = 1
y_pred[20:30, 20:30] = 2
return y_true, y_pred
def compute_th_y_true_y_logits(y_true, y_pred):
# Create torch.tensor from numpy
th_y_true = torch.from_numpy(y_true).unsqueeze(0)
# Create logits torch.tensor:
num_classes = max(np.max(y_true), np.max(y_pred)) + 1
y_probas = np.ones((num_classes,) + y_true.shape) * -10
for i in range(num_classes):
y_probas[i, (y_pred == i)] = 720
th_y_logits = torch.from_numpy(y_probas).unsqueeze(0)
return th_y_true, th_y_logits
def test_multiclass_images():
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().numpy()
assert np.all(true_res == res)
# Another test on batch of 2 images
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes)
# Create a batch of two images:
th_y_true1 = torch.from_numpy(y_true).reshape(1, 30, 30)
th_y_true2 = torch.from_numpy(y_true.transpose()).reshape(1, 30, 30)
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
# Create a batch of 2 logits tensors
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred == 0)] = 720
y_probas[1, (y_pred == 1)] = 720
y_probas[2, (y_pred == 2)] = 768
th_y_logits1 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred.transpose() == 0)] = 720
y_probas[1, (y_pred.transpose() == 2)] = 720
y_probas[2, (y_pred.transpose() == 1)] = 768
th_y_logits2 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
th_y_logits = torch.cat([th_y_logits1, th_y_logits2], dim=0)
# Update metric & compute
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().numpy()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(th_y_true.numpy().reshape(-1), np.argmax(th_y_logits.numpy(), axis=1).reshape(-1))
assert np.all(true_res == res)
def test_iou_wrong_input():
with pytest.raises(TypeError, match="Argument cm should be instance of ConfusionMatrix"):
IoU(None)
cm = ConfusionMatrix(num_classes=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given -1"):
IoU(cm, ignore_index=-1)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given a"):
IoU(cm, ignore_index="a")
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 10"):
IoU(cm, ignore_index=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 11"):
IoU(cm, ignore_index=11)
@pytest.mark.parametrize("average", [None, "samples"])
def test_iou(average):
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
cm = ConfusionMatrix(num_classes=3, average=average)
iou_metric = IoU(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
assert np.all(res == true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
iou_metric = IoU(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
with pytest.raises(ValueError, match=r"ConfusionMatrix should have average attribute either"):
cm = ConfusionMatrix(num_classes=3, average="precision")
IoU(cm)
def test_miou():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
true_res_ = np.mean(true_res)
cm = ConfusionMatrix(num_classes=3)
iou_metric = mIoU(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
assert res == true_res_
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
iou_metric = mIoU(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
true_res_ = np.mean(true_res[:ignore_index] + true_res[ignore_index + 1 :])
assert res == true_res_, f"{ignore_index}: {res} vs {true_res_}"
def test_cm_accuracy():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_acc = accuracy_score(y_true.reshape(-1), y_pred.reshape(-1))
cm = ConfusionMatrix(num_classes=3)
acc_metric = cmAccuracy(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = acc_metric.compute().numpy()
assert pytest.approx(res) == true_acc
def test_cm_precision():
y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro")
cm = ConfusionMatrix(num_classes=10)
pr_metric = cmPrecision(cm, average=True)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = pr_metric.compute().numpy()
assert pytest.approx(res) == true_pr
true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average=None)
cm = ConfusionMatrix(num_classes=10)
pr_metric = cmPrecision(cm, average=False)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = pr_metric.compute().numpy()
assert np.all(res == true_pr)
def test_cm_recall():
y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro")
cm = ConfusionMatrix(num_classes=10)
re_metric = cmRecall(cm, average=True)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = re_metric.compute().numpy()
assert pytest.approx(res) == true_re
true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average=None)
cm = ConfusionMatrix(num_classes=10)
re_metric = cmRecall(cm, average=False)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = re_metric.compute().numpy()
assert np.all(res == true_re)
def test_cm_with_average():
num_classes = 5
y_pred = torch.rand(40, num_classes)
y = torch.randint(0, num_classes, size=(40,)).long()
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
cm = ConfusionMatrix(num_classes=num_classes, average="samples")
cm.update((y_pred, y))
true_res = confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) * 1.0 / len(np_y)
res = cm.compute().numpy()
np.testing.assert_almost_equal(true_res, res)
cm = ConfusionMatrix(num_classes=num_classes, average="recall")
cm.update((y_pred, y))
true_re = recall_score(np_y, np_y_pred, average=None, labels=list(range(num_classes)))
res = cm.compute().numpy().diagonal()
np.testing.assert_almost_equal(true_re, res)
res = cm.compute().numpy()
true_res = confusion_matrix(np_y, np_y_pred, normalize="true")
np.testing.assert_almost_equal(true_res, res)
cm = ConfusionMatrix(num_classes=num_classes, average="precision")
cm.update((y_pred, y))
true_pr = precision_score(np_y, np_y_pred, average=None, labels=list(range(num_classes)))
res = cm.compute().numpy().diagonal()
np.testing.assert_almost_equal(true_pr, res)
res = cm.compute().numpy()
true_res = confusion_matrix(np_y, np_y_pred, normalize="pred")
np.testing.assert_almost_equal(true_res, res)
def test_dice_coefficient_wrong_input():
with pytest.raises(TypeError, match="Argument cm should be instance of ConfusionMatrix"):
DiceCoefficient(None)
cm = ConfusionMatrix(num_classes=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given -1"):
DiceCoefficient(cm, ignore_index=-1)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given a"):
DiceCoefficient(cm, ignore_index="a")
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 10"):
DiceCoefficient(cm, ignore_index=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 11"):
DiceCoefficient(cm, ignore_index=11)
def test_dice_coefficient():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
# dice coefficient: 2*intersection(x, y) / (|x| + |y|)
# union(x, y) = |x| + |y| - intersection(x, y)
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = 2.0 * intersection.sum() / (union.sum() + intersection.sum())
cm = ConfusionMatrix(num_classes=3)
dice_metric = DiceCoefficient(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = dice_metric.compute().numpy()
np.testing.assert_allclose(res, true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
dice_metric = DiceCoefficient(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = dice_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
def _test_distrib_multiclass_images(device):
def _test(metric_device):
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes, device=metric_device)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
th_y_true = th_y_true.to(device)
th_y_logits = th_y_logits.to(device)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().cpu().numpy() / idist.get_world_size()
assert np.all(true_res == res)
# Another test on batch of 2 images
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes, device=metric_device)
# Create a batch of two images:
th_y_true1 = torch.from_numpy(y_true).reshape(1, 30, 30)
th_y_true2 = torch.from_numpy(y_true.transpose()).reshape(1, 30, 30)
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_true = th_y_true.to(device)
# Create a batch of 2 logits tensors
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred == 0)] = 720
y_probas[1, (y_pred == 1)] = 720
y_probas[2, (y_pred == 2)] = 768
th_y_logits1 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred.transpose() == 0)] = 720
y_probas[1, (y_pred.transpose() == 2)] = 720
y_probas[2, (y_pred.transpose() == 1)] = 768
th_y_logits2 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
th_y_logits = torch.cat([th_y_logits1, th_y_logits2], dim=0)
# check update if input is on another device
th_y_logits = th_y_logits.to(device)
# Update metric & compute
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_logits = idist.all_gather(th_y_logits)
np_y_true = th_y_true.cpu().numpy().reshape(-1)
np_y_pred = np.argmax(th_y_logits.cpu().numpy(), axis=1).reshape(-1)
true_res = confusion_matrix(np_y_true, np_y_pred)
assert np.all(true_res == res)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
cm = ConfusionMatrix(num_classes=3, device=metric_device)
assert cm._device == metric_device
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
cm.update((th_y_logits, th_y_true))
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
@pytest.mark.parametrize("average", [None, "samples"])
def test_jaccard_index(average):
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
cm = ConfusionMatrix(num_classes=3, average=average)
jaccard_index = JaccardIndex(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = jaccard_index.compute().numpy()
assert np.all(res == true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
jaccard_index_metric = JaccardIndex(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = jaccard_index_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
with pytest.raises(ValueError, match=r"ConfusionMatrix should have average attribute either"):
cm = ConfusionMatrix(num_classes=3, average="precision")
JaccardIndex(cm)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_multiclass_images, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
|
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import precision_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import Precision
torch.manual_seed(12)
def test_no_update():
precision = Precision()
assert precision._updated is False
with pytest.raises(NotComputableError, match=r"Precision must have at least one example before it can be computed"):
precision.compute()
assert precision._updated is False
def test_average_parameter():
with pytest.raises(ValueError, match="Argument average should be None or a boolean or one of values"):
Precision(average=1)
pr = Precision(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
pr = Precision(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
pr.update((torch.rand(10, 3), torch.randint(0, 3, size=(10,)).long()))
assert pr._updated is False
pr = Precision(average=True)
assert pr._average == "macro"
def test_binary_wrong_inputs():
pr = Precision()
assert pr._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
pr.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
assert pr._updated is False
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
pr = Precision(average=None)
pr.update((torch.randint(0, 2, size=(10,)).float(), torch.randint(0, 2, size=(10,))))
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
pr = Precision(average=None)
pr.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).float()))
def ignite_average_to_scikit_average(average, data_type: str):
if average in [None, "micro", "samples", "weighted", "macro"]:
return average
if average is False:
if data_type == "binary":
return "binary"
else:
return None
elif average is True:
return "macro"
else:
raise ValueError(f"Wrong average parameter `{average}`")
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_binary_input(average):
pr = Precision(average=average)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert pr._type == "binary"
assert pr._updated is True
assert isinstance(pr.compute(), torch.Tensor if not average else float)
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "binary")
assert precision_score(
np_y, np_y_pred, average=sk_average_parameter, labels=[0, 1], zero_division=0
) == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 1, 5)), torch.randint(0, 2, size=(10, 1, 5)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 1, 5)), torch.randint(0, 2, size=(50, 1, 5)), 16),
# Binary accuracy on input of shape (N, H, W)
(torch.randint(0, 2, size=(10, 12, 10)), torch.randint(0, 2, size=(10, 12, 10)), 1),
(torch.randint(0, 2, size=(10, 1, 12, 10)), torch.randint(0, 2, size=(10, 1, 12, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 12, 10)), torch.randint(0, 2, size=(50, 12, 10)), 16),
(torch.randint(0, 2, size=(50, 1, 12, 10)), torch.randint(0, 2, size=(50, 1, 12, 10)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10,), dtype=torch.long), torch.randint(0, 2, size=(10,)), 1),
(torch.zeros(size=(10, 1), dtype=torch.long), torch.randint(0, 2, size=(10, 1)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multiclass_wrong_inputs():
pr = Precision()
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
assert pr._updated is False
pr = Precision(average=True)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
pr.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert pr._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
pr.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert pr._updated is True
pr = Precision(average=False)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
pr.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert pr._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
pr.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert pr._updated is True
with pytest.warns(
RuntimeWarning,
match="`y` should be of dtype long when entry type is multiclass",
):
pr = Precision()
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).float()))
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_multiclass_input(average):
pr = Precision(average=average)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
num_classes = y_pred.shape[1]
np_y_pred = y_pred.argmax(dim=1).numpy().ravel()
np_y = y.numpy().ravel()
assert pr._type == "multiclass"
assert pr._updated is True
assert isinstance(pr.compute(), torch.Tensor if not average else float)
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter)
assert sk_compute == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 6), torch.randint(0, 6, size=(10,)), 1),
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)), 1),
# updated batches
(torch.rand(50, 6), torch.randint(0, 6, size=(50,)), 16),
(torch.rand(50, 4), torch.randint(0, 4, size=(50,)), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 5, 8), torch.randint(0, 5, size=(10, 8)), 1),
(torch.rand(10, 8, 12), torch.randint(0, 8, size=(10, 12)), 1),
# updated batches
(torch.rand(50, 5, 8), torch.randint(0, 5, size=(50, 8)), 16),
(torch.rand(50, 8, 12), torch.randint(0, 8, size=(50, 12)), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(10, 5, 18, 16), torch.randint(0, 5, size=(10, 18, 16)), 1),
(torch.rand(10, 7, 20, 12), torch.randint(0, 7, size=(10, 20, 12)), 1),
# updated batches
(torch.rand(50, 5, 18, 16), torch.randint(0, 5, size=(50, 18, 16)), 16),
(torch.rand(50, 7, 20, 12), torch.randint(0, 7, size=(50, 20, 12)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 6)), torch.randint(0, 6, size=(10,)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 4, size=(10,)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_wrong_inputs():
pr = Precision(is_multilabel=True)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible y_pred
pr.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible y
pr.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.randint(0, 2, size=(20, 5)), torch.randint(0, 2, size=(20, 5)).long()))
pr.update((torch.randint(0, 2, size=(20, 6)), torch.randint(0, 2, size=(20, 6)).long()))
assert pr._updated is True
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted", "samples"])
def test_multilabel_input(average):
pr = Precision(average=average, is_multilabel=True)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert pr._type == "multilabel"
assert pr._updated is True
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert precision_score(np_y, np_y_pred, average=sk_average_parameter) == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 4)), torch.randint(0, 2, size=(50, 4)), 16),
# Multilabel input data of shape (N, C, L)
(torch.randint(0, 2, size=(10, 5, 10)), torch.randint(0, 2, size=(10, 5, 10)), 1),
(torch.randint(0, 2, size=(10, 4, 10)), torch.randint(0, 2, size=(10, 4, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)), torch.randint(0, 2, size=(50, 5, 10)), 16),
(torch.randint(0, 2, size=(50, 4, 10)), torch.randint(0, 2, size=(50, 4, 10)), 16),
# Multilabel input data of shape (N, C, H, W)
(torch.randint(0, 2, size=(10, 5, 18, 16)), torch.randint(0, 2, size=(10, 5, 18, 16)), 1),
(torch.randint(0, 2, size=(10, 4, 20, 23)), torch.randint(0, 2, size=(10, 4, 20, 23)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 18, 16)), torch.randint(0, 2, size=(50, 5, 18, 16)), 16),
(torch.randint(0, 2, size=(50, 4, 20, 23)), torch.randint(0, 2, size=(50, 4, 20, 23)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_type(average):
# Tests changing of type during training
pr = Precision(average=average)
assert pr._updated is False
y_pred = torch.softmax(torch.rand(4, 4), dim=1)
y = torch.ones(4).long()
pr.update((y_pred, y))
assert pr._updated is True
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
pr.update((y_pred, y))
assert pr._updated is True
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_y_classes(average):
pr = Precision(average=average)
assert pr._updated is False
y_pred = torch.randint(0, 2, size=(10, 4)).float()
y = torch.randint(4, 5, size=(10,)).long()
with pytest.raises(ValueError):
pr.update((y_pred, y))
assert pr._updated is False
def test_distrib_integration_multiclass(distributed):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
s = 16
n_classes = 7
offset = n_iters * s
y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device)
y_preds = torch.rand(offset * idist.get_world_size(), n_classes).to(device)
def update(engine, i):
return (
y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset],
)
engine = Engine(update)
pr = Precision(average=average, device=metric_device)
pr.attach(engine, "pr")
assert pr._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
assert "pr" in engine.state.metrics
assert pr._updated is True
res = engine.state.metrics["pr"]
if isinstance(res, torch.Tensor):
# Fixes https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
assert res.device.type == "cpu"
res = res.cpu().numpy()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
true_res = precision_score(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average=sk_average_parameter
)
assert pytest.approx(res) == true_res
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for _ in range(2):
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
def test_distrib_integration_multilabel(distributed):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
s = 16
n_classes = 7
offset = n_iters * s
y_true = torch.randint(0, 2, size=(offset * idist.get_world_size(), n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(offset * idist.get_world_size(), n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, ...],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset, ...],
)
engine = Engine(update)
pr = Precision(average=average, is_multilabel=True, device=metric_device)
pr.attach(engine, "pr")
assert pr._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
assert "pr" in engine.state.metrics
assert pr._updated is True
res = engine.state.metrics["pr"]
res2 = pr.compute()
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
res2 = res2.cpu().numpy()
assert (res == res2).all()
else:
assert res == res2
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
assert pr._type == "multilabel"
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert precision_score(np_y_true, np_y_preds, average=sk_average_parameter) == pytest.approx(res)
metric_devices = ["cpu"]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for _ in range(2):
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="samples", n_epochs=1, metric_device=metric_device)
_test(average="samples", n_epochs=2, metric_device=metric_device)
def test_distrib_accumulator_device(distributed):
# Binary accuracy on input of shape (N, 1) or (N, )
def _test(average, metric_device):
pr = Precision(average=average, device=metric_device)
assert pr._device == metric_device
assert pr._updated is False
# Since the shape of the accumulated amount isn't known before the first update
# call, the internal variables aren't tensors on the right device yet.
y_pred = torch.randint(0, 2, size=(10,))
y = torch.randint(0, 2, size=(10,)).long()
pr.update((y_pred, y))
assert pr._updated is True
assert (
pr._numerator.device == metric_device
), f"{type(pr._numerator.device)}:{pr._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
pr._denominator.device == metric_device
), f"{type(pr._denominator.device)}:{pr._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert pr._weight.device == metric_device, f"{type(pr._weight.device)}:{pr._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
def test_distrib_multilabel_accumulator_device(distributed):
# Multiclass input data of shape (N, ) and (N, C)
def _test(average, metric_device):
pr = Precision(is_multilabel=True, average=average, device=metric_device)
assert pr._updated is False
assert pr._device == metric_device
y_pred = torch.randint(0, 2, size=(10, 4, 20, 23))
y = torch.randint(0, 2, size=(10, 4, 20, 23)).long()
pr.update((y_pred, y))
assert pr._updated is True
assert (
pr._numerator.device == metric_device
), f"{type(pr._numerator.device)}:{pr._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
pr._denominator.device == metric_device
), f"{type(pr._denominator.device)}:{pr._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert pr._weight.device == metric_device, f"{type(pr._weight.device)}:{pr._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
_test("samples", metric_device=metric_device)
|
import numpy as np
import pytest
import torch
from sklearn.metrics import multilabel_confusion_matrix
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.multilabel_confusion_matrix import MultiLabelConfusionMatrix
torch.manual_seed(12)
def test_no_update():
cm = MultiLabelConfusionMatrix(10)
with pytest.raises(
NotComputableError, match=r"Confusion matrix must have at least one example before it can be computed"
):
cm.compute()
def test_num_classes_wrong_input():
with pytest.raises(ValueError, match="Argument num_classes needs to be > 1"):
MultiLabelConfusionMatrix(num_classes=1)
def test_multiclass_wrong_inputs():
cm = MultiLabelConfusionMatrix(10)
with pytest.raises(
ValueError, match=r"y_pred must at least have shape \(batch_size, num_classes \(currently set to 10\), ...\)"
):
cm.update((torch.rand(10), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(
ValueError, match=r"y must at least have shape \(batch_size, num_classes \(currently set to 10\), ...\)"
):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y_pred and y have different batch size: 10 vs 8"):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(8, 10)).long()))
with pytest.raises(ValueError, match=r"y does not have correct number of classes: 9 vs 10"):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(10, 9)).long()))
with pytest.raises(ValueError, match=r"y_pred does not have correct number of classes: 3 vs 10"):
cm.update((torch.rand(10, 3), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(ValueError, match=r"y and y_pred shapes must match."):
cm.update((torch.rand(10, 10, 2), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(
ValueError,
match=r"y_pred must be of any type: \(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64\)",
):
cm.update((torch.rand(10, 10), torch.rand(10, 10)))
with pytest.raises(
ValueError, match=r"y must be of any type: \(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64\)"
):
cm.update((torch.rand(10, 10).type(torch.int32), torch.rand(10, 10)))
with pytest.raises(ValueError, match=r"y_pred must be a binary tensor"):
y = torch.randint(0, 2, size=(10, 10)).long()
y_pred = torch.randint(0, 2, size=(10, 10)).long()
y_pred[0, 0] = 2
cm.update((y_pred, y))
with pytest.raises(ValueError, match=r"y must be a binary tensor"):
y = torch.randint(0, 2, size=(10, 10)).long()
y_pred = torch.randint(0, 2, size=(10, 10)).long()
y[0, 0] = 2
cm.update((y_pred, y))
def get_y_true_y_pred():
# Generate an image with labels 0 (background), 1, 2
# 3 classes:
y_true = np.zeros((1, 3, 30, 30), dtype=np.int64)
y_true[0, 0, 5:17, 7:11] = 1
y_true[0, 1, 1:11, 1:11] = 1
y_true[0, 2, 15:25, 15:25] = 1
y_pred = np.zeros((1, 3, 30, 30), dtype=np.int64)
y_pred[0, 0, 0:7, 8:15] = 1
y_pred[0, 1, 5:15, 1:11] = 1
y_pred[0, 2, 20:30, 20:30] = 1
return y_true, y_pred
def test_multiclass_images():
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
sklearn_CM = multilabel_confusion_matrix(
y_true.transpose((0, 2, 3, 1)).reshape(-1, 3), y_pred.transpose((0, 2, 3, 1)).reshape(-1, 3)
)
# Update metric
output = (torch.tensor(y_pred), torch.tensor(y_true))
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
assert np.all(ignite_CM == sklearn_CM)
# Another test on batch of 2 images
cm = MultiLabelConfusionMatrix(num_classes=num_classes)
# Create a batch of two images:
th_y_true1 = torch.tensor(y_true)
th_y_true2 = torch.tensor(y_true.transpose(0, 1, 3, 2))
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_pred1 = torch.tensor(y_pred)
th_y_pred2 = torch.tensor(y_pred.transpose(0, 1, 3, 2))
th_y_pred = torch.cat([th_y_pred1, th_y_pred2], dim=0)
# Update metric & compute
output = (th_y_pred, th_y_true)
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_pred = idist.all_gather(th_y_pred)
np_y_true = th_y_true.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
np_y_pred = th_y_pred.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
sklearn_CM = multilabel_confusion_matrix(np_y_true, np_y_pred)
assert np.all(ignite_CM == sklearn_CM)
def _test_distrib_multiclass_images(device):
def _test(metric_device):
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes, device=metric_device)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
sklearn_CM = multilabel_confusion_matrix(
y_true.transpose((0, 2, 3, 1)).reshape(-1, 3), y_pred.transpose((0, 2, 3, 1)).reshape(-1, 3)
)
# Update metric
output = (torch.tensor(y_pred).to(device), torch.tensor(y_true).to(device))
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
assert np.all(ignite_CM == sklearn_CM)
# Another test on batch of 2 images
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes, device=metric_device)
# Create a batch of two images:
th_y_true1 = torch.tensor(y_true)
th_y_true2 = torch.tensor(y_true.transpose(0, 1, 3, 2))
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_true = th_y_true.to(device)
th_y_pred1 = torch.tensor(y_pred)
th_y_pred2 = torch.tensor(y_pred.transpose(0, 1, 3, 2))
th_y_pred = torch.cat([th_y_pred1, th_y_pred2], dim=0)
th_y_pred = th_y_pred.to(device)
# Update metric & compute
output = (th_y_pred, th_y_true)
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_pred = idist.all_gather(th_y_pred)
np_y_true = th_y_true.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
np_y_pred = th_y_pred.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
sklearn_CM = multilabel_confusion_matrix(np_y_true, np_y_pred)
assert np.all(ignite_CM == sklearn_CM)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
cm = MultiLabelConfusionMatrix(num_classes=3, device=metric_device)
assert cm._device == metric_device
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_true, y_pred = get_y_true_y_pred()
cm.update((torch.tensor(y_pred), torch.tensor(y_true)))
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
def test_simple_2D_input():
# Tests for 2D inputs with normalized = True and False
num_iters = 5
num_samples = 100
num_classes = 10
torch.manual_seed(0)
for _ in range(num_iters):
target = torch.randint(0, 2, size=(num_samples, num_classes))
prediction = torch.randint(0, 2, size=(num_samples, num_classes))
sklearn_CM = multilabel_confusion_matrix(target.numpy(), prediction.numpy())
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=True)
mlcm.update([prediction, target])
ignite_CM_normalized = mlcm.compute().numpy()
sklearn_CM_normalized = sklearn_CM / sklearn_CM.sum(axis=(1, 2))[:, None, None]
assert np.allclose(sklearn_CM_normalized, ignite_CM_normalized)
def test_simple_ND_input():
num_iters = 5
num_samples = 100
num_classes = 10
torch.manual_seed(0)
size_3d = 4
for _ in range(num_iters): # 3D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 1).reshape(size_3d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 1).reshape(size_3d * num_samples, num_classes)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_4d = 4
for _ in range(num_iters): # 4D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 3, 1).reshape(size_3d * size_4d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 3, 1).reshape(size_3d * size_4d * num_samples, num_classes)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_5d = 4
for _ in range(num_iters): # 5D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d, size_5d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d, size_5d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 3, 4, 1).reshape(size_3d * size_4d * size_5d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 3, 4, 1).reshape(
size_3d * size_4d * size_5d * num_samples, num_classes
)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
def test_simple_batched():
num_iters = 5
num_samples = 100
num_classes = 10
batch_size = 1
torch.manual_seed(0)
for _ in range(num_iters): # 2D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes))
predictions = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes))
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.reshape(-1, num_classes)
predictions_reshaped = predictions.reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_3d = 4
for _ in range(num_iters): # 3D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d))
predictions = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d))
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_4d = 4
for _ in range(num_iters): # 4D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d))
predictions = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d)
)
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 4, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 4, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_5d = 4
for _ in range(num_iters): # 5D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d, size_5d)
)
predictions = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d, size_5d)
)
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 4, 5, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 4, 5, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
# def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
# @pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
# def test_distrib_hvd(gloo_hvd_executor):
# device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
# nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
# gloo_hvd_executor(_test_distrib_multiclass_images, (device,), np=nproc, do_init=True)
# gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
# @pytest.mark.multinode_distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
# def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
#
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.multinode_distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
# def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
#
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.tpu
# @pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
# @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
# def test_distrib_single_device_xla():
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# def _test_distrib_xla_nprocs(index):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.tpu
# @pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
# @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
# def test_distrib_xla_nprocs(xmp_executor):
# n = int(os.environ["NUM_TPU_WORKERS"])
# xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanPairwiseDistance
def test_zero_sample():
mpd = MeanPairwiseDistance()
with pytest.raises(
NotComputableError, match=r"MeanAbsoluteError must have at least one example before it can be computed"
):
mpd.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mpd = MeanPairwiseDistance()
y_pred, y, batch_size = test_case
mpd.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mpd.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mpd.update((y_pred, y))
np_res = np.mean(torch.pairwise_distance(y_pred, y, p=mpd._p, eps=mpd._eps).numpy())
assert isinstance(mpd.compute(), float)
assert pytest.approx(mpd.compute()) == np_res
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
def _test(metric_device):
n_iters = 100
batch_size = 50
y_true = torch.rand(n_iters * batch_size, 10).to(device)
y_preds = torch.rand(n_iters * batch_size, 10).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
m = MeanPairwiseDistance(device=metric_device)
m.attach(engine, "mpwd")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mpwd" in engine.state.metrics
res = engine.state.metrics["mpwd"]
true_res = []
for i in range(n_iters * idist.get_world_size()):
true_res.append(
torch.pairwise_distance(
y_true[i * batch_size : (i + 1) * batch_size, ...],
y_preds[i * batch_size : (i + 1) * batch_size, ...],
p=m._p,
eps=m._eps,
)
.cpu()
.numpy()
)
true_res = np.array(true_res).ravel()
true_res = true_res.mean()
assert pytest.approx(res) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
mpd = MeanPairwiseDistance(device=metric_device)
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[3.0, 4.0], [-3.0, -4.0]])
y = torch.zeros(2, 2)
mpd.update((y_pred, y))
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mpd = MeanPairwiseDistance()
y_pred = torch.tensor([[3.0, 4.0], [-3.0, -4.0]], requires_grad=True)
y = torch.zeros(2, 2)
mpd.update((y_pred, y))
assert not mpd._sum_of_distances.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import EpochMetric
from ignite.metrics.epoch_metric import EpochMetricWarning, NotComputableError
def test_epoch_metric_wrong_setup_or_input():
# Wrong compute function
with pytest.raises(TypeError, match=r"Argument compute_fn should be callable."):
EpochMetric(12345)
def compute_fn(y_preds, y_targets):
return 0.0
em = EpochMetric(compute_fn)
# Wrong input dims
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
output = (torch.tensor(0), torch.tensor(0))
em.update(output)
# Wrong input dims
with pytest.raises(ValueError, match=r"Targets should be of shape"):
output = (torch.rand(4, 3), torch.rand(4, 3, 1))
em.update(output)
# Wrong input dims
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
output = (torch.rand(4, 3, 1), torch.rand(4, 3))
em.update(output)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
output2 = (torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3)))
em.update(output2)
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32))
em.update(output2)
with pytest.raises(
NotComputableError, match="EpochMetric must have at least one example before it can be computed"
):
em = EpochMetric(compute_fn)
em.compute()
def test_epoch_metric():
def compute_fn(y_preds, y_targets):
return 0.0
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
assert all([t.device.type == "cpu" for t in em._predictions + em._targets])
assert torch.equal(em._predictions[0], output1[0])
assert torch.equal(em._predictions[1], output2[0])
assert torch.equal(em._targets[0], output1[1])
assert torch.equal(em._targets[1], output2[1])
assert em.compute() == 0.0
# test when y and y_pred are (batch_size, 1) that are squeezed to (batch_size, )
em.reset()
output1 = (torch.rand(4, 1), torch.randint(0, 2, size=(4, 1), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 1), torch.randint(0, 2, size=(4, 1), dtype=torch.long))
em.update(output2)
assert all([t.device.type == "cpu" for t in em._predictions + em._targets])
assert torch.equal(em._predictions[0], output1[0][:, 0])
assert torch.equal(em._predictions[1], output2[0][:, 0])
assert torch.equal(em._targets[0], output1[1][:, 0])
assert torch.equal(em._targets[1], output2[1][:, 0])
assert em.compute() == 0.0
def test_mse_epoch_metric():
def compute_fn(y_preds, y_targets):
return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
output3 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output3)
preds = torch.cat([output1[0], output2[0], output3[0]], dim=0)
targets = torch.cat([output1[1], output2[1], output3[1]], dim=0)
result = em.compute()
assert result == compute_fn(preds, targets)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
output3 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output3)
preds = torch.cat([output1[0], output2[0], output3[0]], dim=0)
targets = torch.cat([output1[1], output2[1], output3[1]], dim=0)
result = em.compute()
assert result == compute_fn(preds, targets)
def test_bad_compute_fn():
def compute_fn(y_preds, y_targets):
# Following will raise the error:
# The size of tensor a (3) must match the size of tensor b (4)
# at non-singleton dimension 1
return torch.mean(y_preds - y_targets).item()
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 4), dtype=torch.long))
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output1)
def test_check_compute_fn():
def compute_fn(y_preds, y_targets):
raise Exception
em = EpochMetric(compute_fn, check_compute_fn=True)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output1)
em = EpochMetric(compute_fn, check_compute_fn=False)
em.update(output1)
def test_distrib_integration(distributed):
device = idist.device() if idist.device().type != "xla" else "cpu"
rank = idist.get_rank()
torch.manual_seed(40 + rank)
n_iters = 3
batch_size = 2
n_classes = 7
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,), device=device)
y_preds = torch.rand(n_iters * batch_size, n_classes, device=device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
def assert_data_fn(all_preds, all_targets):
return (all_preds.argmax(dim=1) == all_targets).sum().item()
ep_metric = EpochMetric(assert_data_fn, check_compute_fn=False, device=device)
ep_metric.attach(engine, "epm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=3)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
ep_metric_true = (y_preds.argmax(dim=1) == y_true).sum().item()
assert engine.state.metrics["epm"] == ep_metric_true
assert ep_metric.compute() == ep_metric_true
|
# Needed to collect coverage data
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanAbsoluteError
def test_no_update():
mae = MeanAbsoluteError()
with pytest.raises(
NotComputableError, match=r"MeanAbsoluteError must have at least one example before it can be computed"
):
mae.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-10, 10, size=(100, 5)), torch.randint(-10, 10, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mae = MeanAbsoluteError()
y_pred, y, batch_size = test_case
mae.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mae.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mae.update((y_pred, y, batch_size))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_res = (np.abs(np_y_pred - np_y)).sum() / np_y.shape[0]
assert isinstance(mae.compute(), float)
assert mae.compute() == np_res
def _test_distrib_integration(device):
import numpy as np
from ignite.engine import Engine
rank = idist.get_rank()
def _test(metric_device):
n_iters = 80
batch_size = 50
torch.manual_seed(12 + rank)
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
true_res = np.mean(np.abs((y_true - y_preds).cpu().numpy()))
assert pytest.approx(res) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
mae = MeanAbsoluteError(device=metric_device)
for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mae.update((y_pred, y))
for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mae = MeanAbsoluteError()
y_pred = torch.tensor([[2.0], [-2.0]], requires_grad=True)
y = torch.zeros(2)
mae.update((y_pred, y))
assert not mae._sum_of_absolute_errors.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from pytest import approx
from sklearn.metrics import f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import Metric, MetricsLambda, Precision, Recall
class ListGatherMetric(Metric):
def __init__(self, index):
super(ListGatherMetric, self).__init__()
self.index = index
def reset(self):
self.list_ = None
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
def test_metrics_lambda():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
def process_function(engine, data):
return data
engine = Engine(process_function)
def plus(this, other):
return this + other
m0_plus_m1 = MetricsLambda(plus, m0, other=m1)
m2_plus_2 = MetricsLambda(plus, m2, 2)
m0_plus_m1.attach(engine, "m0_plus_m1")
m2_plus_2.attach(engine, "m2_plus_2")
engine.run([[1, 10, 100]])
assert engine.state.metrics["m0_plus_m1"] == 11
assert engine.state.metrics["m2_plus_2"] == 102
engine.run([[2, 20, 200]])
assert engine.state.metrics["m0_plus_m1"] == 22
assert engine.state.metrics["m2_plus_2"] == 202
# metrics are partially attached
assert not m0.is_attached(engine)
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
# a dependency is detached
m0.detach(engine)
# so the lambda metric is too
assert not m0_plus_m1.is_attached(engine)
# the lambda is attached again
m0_plus_m1.attach(engine, "m0_plus_m1")
assert m0_plus_m1.is_attached(engine)
# metrics are always partially attached
assert not m0.is_attached(engine)
m0_plus_m1.detach(engine)
assert not m0_plus_m1.is_attached(engine)
# detached (and no longer partially attached)
assert not m0.is_attached(engine)
def test_metrics_lambda_reset():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
def fn(x, y, z, t):
return 1
m = MetricsLambda(fn, m0, m1, z=m2, t=0)
# initiating a new instance of MetricsLambda must reset
# its argument metrics
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
m.reset()
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
def test_metrics_lambda_update_and_attach_together():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
engine = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(engine, "f1")
with pytest.raises(ValueError, match=r"MetricsLambda is already attached to an engine"):
F1.update((y_pred, y))
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.update((y_pred, y))
engine = Engine(update_fn)
with pytest.raises(ValueError, match=r"The underlying metrics are already updated"):
F1.attach(engine, "f1")
F1.reset()
F1.attach(engine, "f1")
def test_metrics_lambda_update():
"""
Test if the underlying metrics are updated
"""
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.update((y_pred, y))
assert precision._updated
assert recall._updated
F1.reset()
assert not precision._updated
assert not recall._updated
"""
Test multiple updates and if the inputs of
the underlying metrics are updated multiple times
"""
y_pred1 = torch.randint(0, 2, size=(15,))
y1 = torch.randint(0, 2, size=(15,))
y_pred2 = torch.randint(0, 2, size=(15,))
y2 = torch.randint(0, 2, size=(15,))
F1.update((y_pred1, y1))
F1.update((y_pred2, y2))
# Compute true_positives and positives for precision
correct1 = y1 * y_pred1
all_positives1 = y_pred1.sum(dim=0)
if correct1.sum() == 0:
true_positives1 = torch.zeros_like(all_positives1)
else:
true_positives1 = correct1.sum(dim=0)
correct2 = y2 * y_pred2
all_positives2 = y_pred2.sum(dim=0)
if correct2.sum() == 0:
true_positives2 = torch.zeros_like(all_positives2)
else:
true_positives2 = correct2.sum(dim=0)
true_positives = true_positives1 + true_positives2
positives = all_positives1 + all_positives2
assert precision._type == "binary"
assert precision._numerator == true_positives
assert precision._denominator == positives
# Computing positivies for recall is different
positives1 = y1.sum(dim=0)
positives2 = y2.sum(dim=0)
positives = positives1 + positives2
assert recall._type == "binary"
assert recall._numerator == true_positives
assert recall._denominator == positives
"""
Test compute
"""
F1.reset()
F1.update((y_pred1, y1))
F1_metrics_lambda = F1.compute()
F1_sklearn = f1_score(y1.numpy(), y_pred1.numpy())
assert pytest.approx(F1_metrics_lambda) == F1_sklearn
@pytest.mark.parametrize("attach_pr_re", [True, False])
def test_integration(attach_pr_re):
torch.manual_seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = torch.arange(0, n_iters * batch_size) % n_classes
y_pred = 0.2 * torch.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if torch.rand(1) > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = torch.randint(0, n_classes, size=(1,))
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return y_pred_batch, y_true_batch
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
if attach_pr_re:
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, y_pred.argmax(dim=-1), average=None)
recall_true = recall_score(y_true, y_pred.argmax(dim=-1), average=None)
f1_true = f1_score(y_true, y_pred.argmax(dim=-1), average="macro")
assert f1_true == approx(state.metrics["f1"]), f"{f1_true} vs {state.metrics['f1']}"
if attach_pr_re:
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
assert precision_true == approx(precision), f"{precision_true} vs {precision}"
assert recall_true == approx(recall), f"{recall_true} vs {recall}"
def test_state_metrics():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["precision", "recall", "f1"])
def test_state_metrics_ingredients_not_attached():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
F1.attach(evaluator, "F1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["F1"])
def test_recursive_attachment():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = y_pred.numpy().ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision()
precision_2 = Precision()
summed_precision = precision_1 + precision_2
def compute_true_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return p1 + p2
_test(summed_precision, "summed precision", compute_true_value_fn=compute_true_summed_precision)
precision_1 = Precision()
precision_2 = Precision()
mean_precision = (precision_1 + precision_2) / 2
def compute_true_mean_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return (p1 + p2) * 0.5
_test(mean_precision, "mean precision", compute_true_value_fn=compute_true_mean_precision)
precision_1 = Precision()
precision_2 = Precision()
some_metric = 2.0 + 0.2 * (precision_1 * precision_2 + precision_1 - precision_2) ** 0.5
def compute_true_somemetric(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return 2.0 + 0.2 * (p1 * p2 + p1 - p2) ** 0.5
_test(some_metric, "some metric", compute_true_somemetric)
def _test_distrib_integration(device):
rank = idist.get_rank()
n_iters = 10
batch_size = 10
n_classes = 10
def _test(metric_device):
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.int64).to(device) % n_classes
y_pred = 0.2 * torch.rand(n_iters * batch_size, n_classes).to(device)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
def update_fn(engine, i):
y_true_batch = y_true[i * batch_size : (i + 1) * batch_size, ...]
y_pred_batch = y_pred[i * batch_size : (i + 1) * batch_size, ...]
return y_pred_batch, y_true_batch
evaluator = Engine(update_fn)
precision = Precision(average=False, device=metric_device)
recall = Recall(average=False, device=metric_device)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
another_f1 = (1.0 + precision * recall * 2 / (precision + recall + 1e-20)).mean().item()
another_f1.attach(evaluator, "ff1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
y_pred = idist.all_gather(y_pred)
y_true = idist.all_gather(y_true)
assert "f1" in state.metrics
assert "ff1" in state.metrics
f1_true = f1_score(y_true.view(-1).cpu(), y_pred.view(-1, n_classes).argmax(dim=-1).cpu(), average="macro")
assert f1_true == approx(state.metrics["f1"])
assert 1.0 + f1_true == approx(state.metrics["ff1"])
for i in range(3):
torch.manual_seed(12 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_metrics_on_diff_devices(device):
n_classes = 10
n_iters = 12
batch_size = 16
rank = idist.get_rank()
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
evaluator = Engine(update)
precision = Precision(average=False, device="cpu")
recall = Recall(average=False, device=device)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
another_f1 = (1.0 + precision * recall * 2 / (precision + recall + 1e-20)).mean().item()
another_f1.attach(evaluator, "ff1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "f1" in state.metrics
assert "ff1" in state.metrics
f1_true = f1_score(y_true.view(-1).cpu(), y_preds.view(-1, n_classes).argmax(dim=-1).cpu(), average="macro")
assert f1_true == approx(state.metrics["f1"])
assert 1.0 + f1_true == approx(state.metrics["ff1"])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_metrics_on_diff_devices(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_metrics_on_diff_devices, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_metrics_on_diff_devices(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import numpy as np
import pytest
import torch
from skimage.metrics import peak_signal_noise_ratio as ski_psnr
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics import PSNR
from ignite.utils import manual_seed
def test_zero_div():
psnr = PSNR(1.0)
with pytest.raises(NotComputableError, match="PSNR must have at least one example before it can be computed"):
psnr.compute()
def test_invalid_psnr():
y_pred = torch.rand(1, 3, 8, 8)
y = torch.rand(1, 3, 8, 8)
psnr = PSNR(1.0)
with pytest.raises(TypeError, match="Expected y_pred and y to have the same data type."):
psnr.update((y_pred, y.double()))
with pytest.raises(ValueError, match="Expected y_pred and y to have the same shape."):
psnr.update((y_pred, y.squeeze(dim=0)))
@pytest.fixture(params=["float", "YCbCr", "uint8", "NHW shape"])
def test_data(request, available_device):
manual_seed(42)
if request.param == "float":
y_pred = torch.rand(8, 3, 28, 28, device=available_device)
y = y_pred * 0.8
elif request.param == "YCbCr":
y_pred = torch.randint(16, 236, (4, 1, 12, 12), dtype=torch.uint8, device=available_device)
y = torch.randint(16, 236, (4, 1, 12, 12), dtype=torch.uint8, device=available_device)
elif request.param == "uint8":
y_pred = torch.randint(0, 256, (4, 3, 16, 16), dtype=torch.uint8, device=available_device)
y = (y_pred * 0.8).to(torch.uint8)
elif request.param == "NHW shape":
y_pred = torch.rand(8, 28, 28, device=available_device)
y = y_pred * 0.8
else:
raise ValueError(f"Wrong fixture parameter, given {request.param}")
return (y_pred, y)
def test_psnr(test_data, available_device):
y_pred, y = test_data
data_range = (y.max() - y.min()).cpu().item()
psnr = PSNR(data_range=data_range, device=available_device)
psnr.update(test_data)
psnr_compute = psnr.compute()
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert psnr_compute > 0.0
assert isinstance(psnr_compute, float)
assert np.allclose(psnr_compute, np_psnr / np_y.shape[0])
def _test(
y_pred,
y,
data_range,
metric_device,
n_iters,
batch_size,
atol,
output_transform=lambda x: x,
compute_y_channel=False,
):
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size],
y[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
psnr = PSNR(data_range=data_range, output_transform=output_transform, device=metric_device)
psnr.attach(engine, "psnr")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y = idist.all_gather(y)
y_pred = idist.all_gather(y_pred)
assert "psnr" in engine.state.metrics
result = engine.state.metrics["psnr"]
assert result > 0.0
if compute_y_channel:
np_y_pred = y_pred[:, 0, ...].cpu().numpy()
np_y = y[:, 0, ...].cpu().numpy()
else:
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert np.allclose(result, np_psnr / np_y.shape[0], atol=atol)
def test_distrib_input_float(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.rand(n_iters * batch_size, 2, 2, device=device)
y = y_pred * 0.65
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 1, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 1, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_multilabel_input_YCbCr(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.randint(16, 236, (n_iters * batch_size, 1, 12, 12), dtype=torch.uint8, device=device)
cbcr_pred = torch.randint(16, 241, (n_iters * batch_size, 2, 12, 12), dtype=torch.uint8, device=device)
y = torch.randint(16, 236, (n_iters * batch_size, 1, 12, 12), dtype=torch.uint8, device=device)
cbcr = torch.randint(16, 241, (n_iters * batch_size, 2, 12, 12), dtype=torch.uint8, device=device)
y_pred, y = torch.cat((y_pred, cbcr_pred), dim=1), torch.cat((y, cbcr), dim=1)
return y_pred, y
n_iters = 100
batch_size = 10
def out_fn(x):
return x[0][:, 0, ...], x[1][:, 0, ...]
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 220, "cpu", n_iters, batch_size, atol=1e-8, output_transform=out_fn, compute_y_channel=True)
if device.type != "xla":
dev = idist.device()
_test(y_pred, y, 220, dev, n_iters, batch_size, atol=1e-8, output_transform=out_fn, compute_y_channel=True)
def test_distrib_multilabel_input_uint8(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.randint(0, 256, (n_iters * batch_size, 3, 16, 16), device=device, dtype=torch.uint8)
y = (y_pred * 0.65).to(torch.uint8)
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 100, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 100, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_multilabel_input_NHW(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.rand(n_iters * batch_size, 28, 28, device=device)
y = y_pred * 0.8
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 10, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 10, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_accumulator_device(distributed):
device = idist.device()
metric_devices = [torch.device("cpu")]
if torch.device(device).type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
psnr = PSNR(data_range=1.0, device=metric_device)
dev = psnr._device
assert dev == metric_device, f"{dev} vs {metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
psnr.update((y_pred, y))
dev = psnr._sum_of_batchwise_psnr.device
assert dev == metric_device, f"{dev} vs {metric_device}"
|
import os
import pytest
import torch
from sklearn.metrics import accuracy_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics import Accuracy
torch.manual_seed(12)
def test_no_update():
acc = Accuracy()
with pytest.raises(NotComputableError, match=r"Accuracy must have at least one example before it can be computed"):
acc.compute()
def test__check_shape():
acc = Accuracy()
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test__check_type():
acc = Accuracy()
with pytest.raises(RuntimeError, match=r"Invalid shapes of y"):
acc._check_type((torch.rand([1, 1, 1]), torch.rand([1])))
def test_binary_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
acc.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
@pytest.fixture(params=range(12))
def test_data_binary(request):
return [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)).long(), torch.randint(0, 2, size=(10, 5)).long(), 1),
(torch.randint(0, 2, size=(10, 8)).long(), torch.randint(0, 2, size=(10, 8)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)).long(), torch.randint(0, 2, size=(50, 5)).long(), 16),
(torch.randint(0, 2, size=(50, 8)).long(), torch.randint(0, 2, size=(50, 8)).long(), 16),
# Binary accuracy on input of shape (N, H, W, ...)
(torch.randint(0, 2, size=(4, 1, 12, 10)).long(), torch.randint(0, 2, size=(4, 1, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(15, 1, 20, 10)).long(), torch.randint(0, 2, size=(15, 1, 20, 10)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 1, 12, 10)).long(), torch.randint(0, 2, size=(50, 1, 12, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 1, 20, 10)).long(), torch.randint(0, 2, size=(50, 1, 20, 10)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_binary_input(n_times, test_data_binary):
acc = Accuracy()
y_pred, y, batch_size = test_data_binary
acc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def test_multiclass_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
@pytest.fixture(params=range(11))
def test_data_multiclass(request):
return [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)).long(), 1),
(torch.rand(10, 10, 1), torch.randint(0, 18, size=(10, 1)).long(), 1),
(torch.rand(10, 18), torch.randint(0, 18, size=(10,)).long(), 1),
(torch.rand(4, 10), torch.randint(0, 10, size=(4,)).long(), 1),
# 2-classes
(torch.rand(4, 2), torch.randint(0, 2, size=(4,)).long(), 1),
(torch.rand(100, 5), torch.randint(0, 5, size=(100,)).long(), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 4, 5), torch.randint(0, 4, size=(10, 5)).long(), 1),
(torch.rand(4, 10, 5), torch.randint(0, 10, size=(4, 5)).long(), 1),
(torch.rand(100, 9, 7), torch.randint(0, 9, size=(100, 7)).long(), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(4, 5, 12, 10), torch.randint(0, 5, size=(4, 12, 10)).long(), 1),
(torch.rand(100, 3, 8, 8), torch.randint(0, 3, size=(100, 8, 8)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multiclass_input(n_times, test_data_multiclass):
acc = Accuracy()
y_pred, y, batch_size = test_data_multiclass
acc.reset()
if batch_size > 1:
# Batched Updates
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert acc._type == "multiclass"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
def test_multilabel_wrong_inputs():
acc = Accuracy(is_multilabel=True)
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible y_pred
acc.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible y
acc.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
with pytest.raises(ValueError):
# incompatible binary shapes
acc.update((torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)).long()))
@pytest.fixture(params=range(12))
def test_data_multilabel(request):
return [
# Multilabel input data of shape (N, C) and (N, C)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
# Multilabel input data of shape (N, H, W)
(torch.randint(0, 2, size=(10, 5, 10)).long(), torch.randint(0, 2, size=(10, 5, 10)).long(), 1),
(torch.randint(0, 2, size=(10, 4, 10)).long(), torch.randint(0, 2, size=(10, 4, 10)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)).long(), torch.randint(0, 2, size=(50, 5, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 4, 10)).long(), torch.randint(0, 2, size=(50, 4, 10)).long(), 16),
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
(torch.randint(0, 2, size=(4, 5, 12, 10)).long(), torch.randint(0, 2, size=(4, 5, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(4, 10, 12, 8)).long(), torch.randint(0, 2, size=(4, 10, 12, 8)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 12, 10)).long(), torch.randint(0, 2, size=(50, 5, 12, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 10, 12, 8)).long(), torch.randint(0, 2, size=(50, 10, 12, 8)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multilabel_input(n_times, test_data_multilabel):
acc = Accuracy(is_multilabel=True)
y_pred, y, batch_size = test_data_multilabel
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert acc._type == "multilabel"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def test_incorrect_type():
acc = Accuracy()
# Start as binary data
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
acc.update((y_pred, y))
# And add a multiclass data
y_pred = torch.rand(4, 4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
acc.update((y_pred, y))
def _test_distrib_multilabel_input_NHW(device):
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
acc = Accuracy(is_multilabel=True, device=metric_device)
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(4, 5, 8, 10), device=device).long()
y = torch.randint(0, 2, size=(4, 5, 8, 10), device=device).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
acc.reset()
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(4, 7, 10, 8), device=device).long()
y = torch.randint(0, 2, size=(4, 7, 10, 8), device=device).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# check that result is not changed
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# Batched Updates
acc.reset()
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(80, 5, 8, 10), device=device).long()
y = torch.randint(0, 2, size=(80, 5, 8, 10), device=device).long()
batch_size = 16
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, L, ...) -> (N * L * ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, L, ...) -> (N * L ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration_multiclass(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_integration_multilabel(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 8, 10)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 8, 10)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
acc = Accuracy(is_multilabel=True, device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(to_numpy_multilabel(y_true), to_numpy_multilabel(y_preds))
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
acc = Accuracy(device=metric_device)
assert acc._device == metric_device
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_pred = torch.randint(0, 2, size=(10,), device=device, dtype=torch.long)
y = torch.randint(0, 2, size=(10,), device=device, dtype=torch.long)
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
def _test_distrib_integration_list_of_tensors_or_numbers(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(_, i):
return (
[v for v in y_preds[i * batch_size : (i + 1) * batch_size, ...]],
[v.item() for v in y_true[i * batch_size : (i + 1) * batch_size]],
)
engine = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_multilabel_input_NHW, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multiclass, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multilabel, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_list_of_tensors_or_numbers, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
|
import os
import sys
import time
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics import Frequency
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_nondistributed_average():
artificial_time = 1 # seconds
num_tokens = 100
average_upper_bound = num_tokens / artificial_time
average_lower_bound = average_upper_bound * 0.9
freq_metric = Frequency()
freq_metric.reset()
time.sleep(artificial_time)
freq_metric.update(num_tokens)
average = freq_metric.compute()
assert average_lower_bound < average < average_upper_bound
def _test_frequency_with_engine(workers=None, lower_bound_factor=0.8, upper_bound_factor=1.1, every=1):
if workers is None:
workers = idist.get_world_size()
artificial_time = 1.0 / workers # seconds
total_tokens = 400 // workers
batch_size = 128 // workers
estimated_wps = batch_size * workers / artificial_time
def update_fn(engine, batch):
time.sleep(artificial_time)
return {"ntokens": len(batch)}
engine = Engine(update_fn)
wps_metric = Frequency(output_transform=lambda x: x["ntokens"])
event = Events.ITERATION_COMPLETED(every=every)
wps_metric.attach(engine, "wps", event_name=event)
@engine.on(event)
def assert_wps(e):
wps = e.state.metrics["wps"]
# Skip iterations 2, 3, 4 if backend is Horovod on CUDA,
# wps is abnormally low for these iterations
# otherwise, other values of wps are OK
if idist.model_name() == "horovod-dist" and e.state.iteration in (2, 3, 4):
return
low_wps = estimated_wps * lower_bound_factor
high_wps = estimated_wps * upper_bound_factor
assert low_wps < wps <= high_wps, f"{e.state.iteration}: {low_wps} < {wps} <= {high_wps}"
data = [[i] * batch_size for i in range(0, total_tokens, batch_size)]
engine.run(data, max_epochs=2)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_frequency_with_engine():
_test_frequency_with_engine(workers=1)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_frequency_with_engine_distributed(distributed_context_single_node_gloo):
_test_frequency_with_engine(workers=idist.get_world_size())
def test_frequency_with_engine_with_every():
_test_frequency_with_engine(workers=1, every=1)
_test_frequency_with_engine(workers=1, every=10)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_frequency_with_engine_distributed_with_every(distributed_context_single_node_gloo):
_test_frequency_with_engine(workers=idist.get_world_size(), every=1)
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_frequency_with_engine, (None, 0.8, 1), np=nproc, do_init=True)
gloo_hvd_executor(_test_frequency_with_engine, (None, 0.8, 10), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
def _test_distrib_xla_nprocs(index):
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from torch.nn import Linear
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.accumulation import Average, GeometricAverage, VariableAccumulation
torch.manual_seed(15)
def test_variable_accumulation_wrong_inputs():
with pytest.raises(TypeError, match=r"Argument op should be a callable"):
VariableAccumulation(1)
with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
mean_acc = VariableAccumulation(lambda a, x: a + x)
mean_acc.update((1, 2))
with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
mean_acc = VariableAccumulation(lambda a, x: a + x)
mean_acc.update("a")
def test_variable_accumulation_mean_variable():
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true)
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100, 10)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
assert n == len(y_true)
mean_var = VariableAccumulation(lambda a, x: a + x.sum(dim=0))
# iterate by batch of 16 samples
y_true = torch.rand(8, 16, 10)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.numpy() == pytest.approx(y_true.reshape(-1, 10).sum(dim=0).numpy())
assert n == y_true.shape[0] * y_true.shape[1]
def test_average():
with pytest.raises(NotComputableError):
v = Average()
v.compute()
mean_var = Average()
y_true = torch.rand(100) + torch.randint(0, 10, size=(100,)).float()
for y in y_true:
mean_var.update(y.item())
m = mean_var.compute()
assert m.item() == pytest.approx(y_true.mean().item())
mean_var = Average()
y_true = torch.rand(100, 10) + torch.randint(0, 10, size=(100, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
assert m.numpy() == pytest.approx(y_true.mean(dim=0).numpy())
mean_var = Average()
y_true = torch.rand(8, 16, 10) + torch.randint(0, 10, size=(8, 16, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
assert m.numpy() == pytest.approx(y_true.reshape(-1, 10).mean(dim=0).numpy())
def _geom_mean(t):
np_t = t.numpy()
return np.exp(np.mean(np.log(np_t), axis=0))
def _mean(y_true):
return y_true.mean(dim=0).numpy()
def test_geom_average():
with pytest.raises(NotComputableError):
v = GeometricAverage()
v.compute()
mean_var = GeometricAverage()
y_true = torch.rand(100) + torch.randint(0, 10, size=(100,)).float()
for y in y_true:
mean_var.update(y.item())
m = mean_var.compute()
assert m == pytest.approx(_geom_mean(y_true))
mean_var = GeometricAverage()
y_true = torch.rand(100, 10) + torch.randint(0, 10, size=(100, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
np.testing.assert_almost_equal(m.numpy(), _geom_mean(y_true), decimal=5)
mean_var = GeometricAverage()
y_true = torch.rand(8, 16, 10) + torch.randint(0, 10, size=(8, 16, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
np.testing.assert_almost_equal(m.numpy(), _geom_mean(y_true.reshape(-1, 10)), decimal=5)
@pytest.mark.parametrize("metric_cls, true_result_fn", [(Average, _mean), (GeometricAverage, _geom_mean)])
@pytest.mark.parametrize("shape", [[100, 12], [100]])
def test_integration(metric_cls, true_result_fn, shape):
assert len(shape) > 0 and len(shape) < 3
custom_variable = 10.0 + 5.0 * torch.rand(shape)
def update_fn(engine, batch):
output = custom_variable[engine.state.iteration - 1]
output = output.item() if output.ndimension() < 1 else output
return 0, output
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1])
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * shape[0])
np.testing.assert_almost_equal(
np.array(state.metrics["agg_custom_var"]), true_result_fn(custom_variable), decimal=5
)
def test_compute_mean_std():
n = 8
b = 12
c = 3
w = h = 64
true_data = np.arange(0, n * b * h * w * c, dtype="float64").reshape(n * b, c, h, w) - (n * b * c * w * h * 0.75)
mean = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).mean(axis=0)
std = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).std(axis=0)
train_loader = torch.from_numpy(true_data).reshape(n, b, c, h, w)
def compute_mean_std(engine, batch):
_b, _c = batch.shape[:2]
data = batch.reshape(_b, _c, -1).to(dtype=torch.float64)
_mean = torch.mean(data, dim=-1)
_mean2 = torch.mean(data**2, dim=-1)
return {"mean": _mean, "mean^2": _mean2}
compute_engine = Engine(compute_mean_std)
img_mean = Average(output_transform=lambda output: output["mean"])
img_mean2 = Average(output_transform=lambda output: output["mean^2"])
img_mean.attach(compute_engine, "mean")
img_mean2.attach(compute_engine, "mean2")
state = compute_engine.run(train_loader)
state.metrics["std"] = torch.sqrt(state.metrics["mean2"] - state.metrics["mean"] ** 2)
np.testing.assert_almost_equal(state.metrics["mean"].numpy(), mean, decimal=7)
np.testing.assert_almost_equal(state.metrics["std"].numpy(), std, decimal=5)
def _test_distrib_variable_accumulation(device):
def _test(metric_device):
mean_var = VariableAccumulation(lambda a, x: a + x, device=metric_device)
y_true = torch.rand(100, device=device, dtype=torch.float64)
for y in y_true:
mean_var.update(y)
y_true = idist.all_reduce(y_true)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true) * idist.get_world_size()
# check if call compute twice
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true) * idist.get_world_size()
mean_var = VariableAccumulation(lambda a, x: a + x, device=metric_device)
y_true = torch.rand(50, 10, device=device, dtype=torch.float64)
for y in y_true:
mean_var.update(y)
y_true = idist.all_reduce(y_true)
a, n = mean_var.compute()
assert n == len(y_true) * idist.get_world_size()
np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4)
a, n = mean_var.compute()
assert n == len(y_true) * idist.get_world_size()
np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_average(device):
def _test(metric_device):
with pytest.raises(NotComputableError):
v = Average(device=metric_device)
v.compute()
mean_var = Average(device=metric_device)
y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100,)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
y_true = idist.all_reduce(y_true)
assert m.item() == pytest.approx(y_true.mean().item() / idist.get_world_size())
mean_var = Average(device=metric_device)
y_true = torch.rand(100, 10, dtype=torch.float64) + torch.randint(0, 10, size=(100, 10)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
y_true = idist.all_reduce(y_true)
np.testing.assert_almost_equal(
m.cpu().numpy(), y_true.mean(dim=0).cpu().numpy() / idist.get_world_size(), decimal=5
)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_geom_average(device):
def _test(metric_device):
with pytest.raises(NotComputableError):
v = GeometricAverage(device=metric_device)
v.compute()
decimal = 5 if device.type != "xla" else 4
mean_var = GeometricAverage(device=metric_device)
y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100,)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
np.testing.assert_almost_equal(
m, torch.exp(log_y_true.mean(dim=0) / idist.get_world_size()).item(), decimal=decimal
)
mean_var = GeometricAverage(device=metric_device)
y_true = torch.rand(100, 10, dtype=torch.float64) + torch.randint(0, 10, size=(100, 10)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
np.testing.assert_almost_equal(
m.cpu().numpy(), torch.exp(log_y_true.mean(dim=0) / idist.get_world_size()).cpu().numpy(), decimal=decimal
)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _dist_mean(y_true):
y_true = idist.all_reduce(y_true) / idist.get_world_size()
if len(y_true.shape) > 2:
y_true = y_true.reshape(-1, y_true.shape[-1])
return y_true.mean(dim=0).cpu().numpy()
def _dist_geom_mean(y_true):
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
if len(log_y_true.shape) > 2:
log_y_true = log_y_true.reshape(-1, log_y_true.shape[-1])
np_t = log_y_true.cpu().numpy()
return np.exp(np.mean(np_t, axis=0) / idist.get_world_size())
def _test_distrib_integration(device):
def _test(metric_cls, shape, true_result_fn, metric_device, tol=1e-5):
size = 100
custom_variable = 10.0 + 5.0 * torch.rand(size, *shape, dtype=torch.float64)
custom_variable = custom_variable.to(device)
def update_fn(engine, batch):
return 0, custom_variable[engine.state.iteration - 1]
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=metric_device)
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * size)
true_val = true_result_fn(custom_variable)
assert len(true_val) == shape[-1]
np.testing.assert_almost_equal(
state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol))
)
size = 100
custom_variable = 10.0 + 5.0 * torch.rand(size, dtype=torch.float64)
custom_variable = custom_variable.to(device)
def update_fn(engine, batch):
return 0, custom_variable[engine.state.iteration - 1].item()
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=metric_device)
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * size)
assert state.metrics["agg_custom_var"] == pytest.approx(true_result_fn(custom_variable), abs=tol)
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(Average, (12,), _dist_mean, metric_device)
_test(Average, (4, 12), _dist_mean, metric_device)
_test(GeometricAverage, (12,), _dist_geom_mean, metric_device, tol=1e-4)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
m = VariableAccumulation(lambda a, x: x, device=metric_device)
assert m._device == metric_device
assert (
m.accumulator.device == metric_device
), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
m.update(torch.tensor(1, device=device))
assert (
m.accumulator.device == metric_device
), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
def _test_apex_average(device, amp_mode, opt_level):
assert amp_mode == "apex"
assert device == "cuda"
model = Linear(1, 1)
if device:
model.to(device)
model.weight.data.zero_()
model.bias.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100).float().to(device)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = idist.device()
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_variable_accumulation, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_average, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_geom_average, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
# Enable this test when apex issue is fixed
# @pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
# @pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
@pytest.mark.skip(reason="Temporarily disabled, as it fails because of an issue from apex side")
def test_apex_average_on_cuda():
device = "cuda"
_test_apex_average(device, amp_mode="apex", opt_level="O0")
_test_apex_average(device, amp_mode="apex", opt_level="O1")
_test_apex_average(device, amp_mode="apex", opt_level="O2")
_test_apex_average(device, amp_mode="apex", opt_level="O3")
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
|
import os
from unittest.mock import MagicMock
import pytest
import torch
from numpy.testing import assert_almost_equal
from torch import nn
from torch.nn.functional import nll_loss
import ignite.distributed as idist
from ignite.engine import State
from ignite.exceptions import NotComputableError
from ignite.metrics import Loss, Precision
class DummyLoss1(Loss):
def __init__(self, loss_fn, true_output, output_transform=lambda x: x):
super(DummyLoss1, self).__init__(loss_fn, output_transform=output_transform)
print(true_output)
self.true_output = true_output
def reset(self):
pass
def compute(self):
pass
def update(self, output):
assert output == self.true_output
def test_output_as_mapping_without_criterion_kwargs():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {}
loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)
def test_output_as_mapping_with_criterion_kwargs():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {"reduction": "sum"}
loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {"reduction": "sum"}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)
def y_test_1(requires_grad=False, device=None):
return (
torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]], device=device, requires_grad=requires_grad).log(),
torch.tensor([2, 2], device=device).long(),
1.1512925625,
)
def y_test_2():
return (
torch.tensor([[0.1, 0.3, 0.6], [0.6, 0.2, 0.2], [0.2, 0.7, 0.1]]).log(),
torch.tensor([2, 0, 2]).long(),
1.1253643036,
)
def y_test_3():
return torch.tensor([[0.1, 0.3, 0.6], [0.6, 0.2, 0.2]]).log(), torch.tensor([2, 0]).long()
def test_zero_div():
loss = Loss(nll_loss)
with pytest.raises(NotComputableError, match=r"Loss must have at least one example before it can be computed"):
loss.compute()
@pytest.mark.parametrize("criterion", [nll_loss, nn.NLLLoss()])
def test_compute(criterion):
loss = Loss(criterion)
y_pred, y, expected_loss = y_test_1()
loss.update((y_pred, y))
assert_almost_equal(loss.compute(), expected_loss)
y_pred, y, expected_loss = y_test_2()
loss.update((y_pred, y))
assert_almost_equal(loss.compute(), expected_loss) # average
def test_non_averaging_loss():
loss = Loss(nn.NLLLoss(reduction="none"))
y_pred, y, _ = y_test_1()
with pytest.raises(ValueError):
loss.update((y_pred, y))
def test_gradient_based_loss():
# Tests https://github.com/pytorch/ignite/issues/1674
x = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]], requires_grad=True)
y_pred = x.mm(torch.randn(size=(3, 1)))
def loss_fn(y_pred, x):
gradients = torch.autograd.grad(
outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True
)[0]
gradients = gradients.flatten(start_dim=1)
return gradients.norm(2, dim=1).mean()
loss = Loss(loss_fn)
loss.update((y_pred, x))
def test_kwargs_loss():
loss = Loss(nll_loss)
y_pred, y, _ = y_test_1()
kwargs = {"weight": torch.tensor([0.1, 0.1, 0.1])}
loss.update((y_pred, y, kwargs))
expected_value = nll_loss(y_pred, y, **kwargs)
assert_almost_equal(loss.compute(), expected_value)
def test_reset():
loss = Loss(nll_loss)
y_pred, y = y_test_3()
loss.update((y_pred, y))
loss.compute()
loss.reset()
with pytest.raises(NotComputableError):
loss.compute()
def _test_distrib_compute_on_criterion(device, y_test_1, y_test_2, tol=None):
def _test(metric_device, y_test_1, y_test_2):
criterion = nn.NLLLoss().to(device)
loss = Loss(criterion, device=metric_device)
y_pred, y, _ = y_test_1
loss.update((y_pred, y))
n = loss._num_examples
assert n == len(y)
res = loss.compute()
assert n == loss._num_examples
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
true_loss_value = criterion(y_pred, y)
assert_almost_equal(res, true_loss_value.item())
loss.reset()
y_pred, y, _ = y_test_2
loss.update((y_pred, y))
n = loss._num_examples
res = loss.compute()
assert n == loss._num_examples
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
true_loss_value = criterion(y_pred, y)
if tol is None:
assert_almost_equal(res, true_loss_value.item())
else:
assert pytest.approx(res, rel=tol) == true_loss_value.item()
_test("cpu", y_test_1, y_test_2)
if device.type != "xla":
_test(idist.device(), y_test_1, y_test_2)
def _test_distrib_accumulator_device(device, y_test_1):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
loss = Loss(nll_loss, device=metric_device)
assert loss._device == metric_device
assert (
loss._sum.device == metric_device
), f"{type(loss._sum.device)}:{loss._sum.device} vs {type(metric_device)}:{metric_device}"
y_pred, y, _ = y_test_1
loss.update((y_pred, y))
assert (
loss._sum.device == metric_device
), f"{type(loss._sum.device)}:{loss._sum.device} vs {type(metric_device)}:{metric_device}"
def test_sum_detached():
loss = Loss(nll_loss)
y_pred, y, _ = y_test_1(requires_grad=True)
loss.update((y_pred, y))
assert not loss._sum.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute_on_criterion, (device, y_test_1(), y_test_2()), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device, y_test_1()), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2(), tol=1e-6)
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
def test_override_required_output_keys():
# https://github.com/pytorch/ignite/issues/1415
from ignite.engine import create_supervised_evaluator
counter = [0]
class DummyLoss2(Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, criterion_kwargs = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert criterion_kwargs == c_kwargs
assert y.equal(data[counter[0]][1])
counter[0] += 1
def reset(self):
pass
def compute(self):
pass
model = nn.Linear(10, 3)
metrics = {"Precision": Precision(), "DummyLoss2": DummyLoss2(nll_loss)}
# global criterion kwargs
c_kwargs = {"reduction": "sum"}
evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": c_kwargs},
)
data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)
|
import os
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import Recall
torch.manual_seed(12)
def test_no_update():
recall = Recall()
assert recall._updated is False
with pytest.raises(NotComputableError, match=r"Recall must have at least one example before it can be computed"):
recall.compute()
assert recall._updated is False
recall = Recall(is_multilabel=True)
assert recall._updated is False
with pytest.raises(NotComputableError, match=r"Recall must have at least one example before it can be computed"):
recall.compute()
assert recall._updated is False
def test_average_parameter():
re = Recall(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
re.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
re = Recall(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
re.update((torch.rand(10, 3), torch.randint(0, 3, size=(10,)).long()))
assert re._updated is False
re = Recall(average=True)
assert re._average == "macro"
def test_binary_wrong_inputs():
re = Recall()
assert re._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
re.update((torch.randint(0, 2, size=(10,)), torch.arange(0, 10).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
re.update((torch.rand(10, 1), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10, 5, 6)), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5, 6)).long()))
assert re._updated is False
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
re = Recall(average=None)
re.update((torch.randint(0, 2, size=(10,)).float(), torch.randint(0, 2, size=(10,))))
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
re = Recall(average=None)
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).float()))
def ignite_average_to_scikit_average(average, data_type: str):
if average in [None, "micro", "samples", "weighted", "macro"]:
return average
if average is False:
if data_type == "binary":
return "binary"
else:
return None
elif average is True:
return "macro"
else:
raise ValueError(f"Wrong average parameter `{average}`")
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_binary_input(average):
re = Recall(average=average)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert re._type == "binary"
assert re._updated is True
assert isinstance(re.compute(), torch.Tensor if not average else float)
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "binary")
assert recall_score(np_y, np_y_pred, average=sk_average_parameter, labels=[0, 1]) == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 1, 5)), torch.randint(0, 2, size=(10, 1, 5)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 1, 5)), torch.randint(0, 2, size=(50, 1, 5)), 16),
# Binary accuracy on input of shape (N, H, W)
(torch.randint(0, 2, size=(10, 12, 10)), torch.randint(0, 2, size=(10, 12, 10)), 1),
(torch.randint(0, 2, size=(10, 1, 12, 10)), torch.randint(0, 2, size=(10, 1, 12, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 12, 10)), torch.randint(0, 2, size=(50, 12, 10)), 16),
(torch.randint(0, 2, size=(50, 1, 12, 10)), torch.randint(0, 2, size=(50, 1, 12, 10)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10,), dtype=torch.long), torch.randint(0, 2, size=(10,)), 1),
(torch.zeros(size=(10, 1), dtype=torch.long), torch.randint(0, 2, size=(10, 1)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multiclass_wrong_inputs():
re = Recall()
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
assert re._updated is False
re = Recall(average=True)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
re.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert re._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
re.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert re._updated is True
re = Recall(average=False)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
re.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert re._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
re.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert re._updated is True
with pytest.warns(
RuntimeWarning,
match="`y` should be of dtype long when entry type is multiclass",
):
re = Recall()
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).float()))
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_multiclass_input(average):
re = Recall(average=average)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
num_classes = y_pred.shape[1]
np_y_pred = y_pred.argmax(dim=1).numpy().ravel()
np_y = y.numpy().ravel()
assert re._type == "multiclass"
assert re._updated is True
assert isinstance(re.compute(), torch.Tensor if not average else float)
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter)
assert sk_compute == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 6), torch.randint(0, 6, size=(10,)), 1),
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)), 1),
# updated batches
(torch.rand(50, 6), torch.randint(0, 6, size=(50,)), 16),
(torch.rand(50, 4), torch.randint(0, 4, size=(50,)), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 5, 8), torch.randint(0, 5, size=(10, 8)), 1),
(torch.rand(10, 8, 12), torch.randint(0, 8, size=(10, 12)), 1),
# updated batches
(torch.rand(50, 5, 8), torch.randint(0, 5, size=(50, 8)), 16),
(torch.rand(50, 8, 12), torch.randint(0, 8, size=(50, 12)), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(10, 5, 18, 16), torch.randint(0, 5, size=(10, 18, 16)), 1),
(torch.rand(10, 7, 20, 12), torch.randint(0, 7, size=(10, 20, 12)), 1),
# updated batches
(torch.rand(50, 5, 18, 16), torch.randint(0, 5, size=(50, 18, 16)), 16),
(torch.rand(50, 7, 20, 12), torch.randint(0, 7, size=(50, 20, 12)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 6)), torch.randint(0, 6, size=(10,)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 4, size=(10,)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_wrong_inputs():
re = Recall(is_multilabel=True)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible y_pred
re.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible y
re.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.randint(0, 2, size=(20, 5)), torch.randint(0, 2, size=(20, 5)).long()))
re.update((torch.randint(0, 2, size=(20, 6)), torch.randint(0, 2, size=(20, 6)).long()))
assert re._updated is True
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "samples"])
def test_multilabel_input(average):
re = Recall(average=average, is_multilabel=True)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert re._type == "multilabel"
assert re._updated is True
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert recall_score(np_y, np_y_pred, average=sk_average_parameter) == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 4)), torch.randint(0, 2, size=(50, 4)), 16),
# Multilabel input data of shape (N, H, W)
(torch.randint(0, 2, size=(10, 5, 10)), torch.randint(0, 2, size=(10, 5, 10)), 1),
(torch.randint(0, 2, size=(10, 4, 10)), torch.randint(0, 2, size=(10, 4, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)), torch.randint(0, 2, size=(50, 5, 10)), 16),
(torch.randint(0, 2, size=(50, 4, 10)), torch.randint(0, 2, size=(50, 4, 10)), 16),
# Multilabel input data of shape (N, C, H, W, ...)
(torch.randint(0, 2, size=(10, 5, 18, 16)), torch.randint(0, 2, size=(10, 5, 18, 16)), 1),
(torch.randint(0, 2, size=(10, 4, 20, 23)), torch.randint(0, 2, size=(10, 4, 20, 23)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 18, 16)), torch.randint(0, 2, size=(50, 5, 18, 16)), 16),
(torch.randint(0, 2, size=(50, 4, 20, 23)), torch.randint(0, 2, size=(50, 4, 20, 23)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_type(average):
# Tests changing of type during training
re = Recall(average=average)
assert re._updated is False
y_pred = torch.softmax(torch.rand(4, 4), dim=1)
y = torch.ones(4).long()
re.update((y_pred, y))
assert re._updated is True
y_pred = torch.zeros(4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
re.update((y_pred, y))
assert re._updated is True
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_y_classes(average):
re = Recall(average=average)
assert re._updated is False
y_pred = torch.randint(0, 2, size=(10, 4)).float()
y = torch.randint(4, 5, size=(10,)).long()
with pytest.raises(ValueError):
re.update((y_pred, y))
assert re._updated is False
def _test_distrib_integration_multiclass(device):
from ignite.engine import Engine
def _test(average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
re = Recall(average=average, device=metric_device)
re.attach(engine, "re")
assert re._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "re" in engine.state.metrics
assert re._updated is True
res = engine.state.metrics["re"]
if isinstance(res, torch.Tensor):
# Fixes https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
assert res.device.type == "cpu"
res = res.cpu().numpy()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
true_res = recall_score(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average=sk_average_parameter
)
assert pytest.approx(res) == true_res
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
def _test_distrib_integration_multilabel(device):
from ignite.engine import Engine
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
re = Recall(average=average, is_multilabel=True, device=metric_device)
re.attach(engine, "re")
assert re._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "re" in engine.state.metrics
assert re._updated is True
res = engine.state.metrics["re"]
res2 = re.compute()
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
res2 = res2.cpu().numpy()
assert (res == res2).all()
else:
assert res == res2
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
assert re._type == "multilabel"
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert recall_score(np_y_true, np_y_preds, average=sk_average_parameter) == pytest.approx(res)
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="samples", n_epochs=1, metric_device=metric_device)
_test(average="samples", n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
# Binary accuracy on input of shape (N, 1) or (N, )
def _test(average, metric_device):
re = Recall(average=average, device=metric_device)
assert re._device == metric_device
assert re._updated is False
# Since the shape of the accumulated amount isn't known before the first update
# call, the internal variables aren't tensors on the right device yet.
y_reed = torch.randint(0, 2, size=(10,))
y = torch.randint(0, 2, size=(10,)).long()
re.update((y_reed, y))
assert re._updated is True
assert (
re._numerator.device == metric_device
), f"{type(re._numerator.device)}:{re._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
re._denominator.device == metric_device
), f"{type(re._denominator.device)}:{re._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert re._weight.device == metric_device, f"{type(re._weight.device)}:{re._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
def _test_distrib_multilabel_accumulator_device(device):
# Multiclass input data of shape (N, ) and (N, C)
def _test(average, metric_device):
re = Recall(is_multilabel=True, average=average, device=metric_device)
assert re._updated is False
assert re._device == metric_device
y_reed = torch.randint(0, 2, size=(10, 4, 20, 23))
y = torch.randint(0, 2, size=(10, 4, 20, 23)).long()
re.update((y_reed, y))
assert re._updated is True
assert (
re._numerator.device == metric_device
), f"{type(re._numerator.device)}:{re._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
re._denominator.device == metric_device
), f"{type(re._denominator.device)}:{re._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert re._weight.device == metric_device, f"{type(re._weight.device)}:{re._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
_test("samples", metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration_multiclass, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multilabel, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_multilabel_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import fbeta_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import Fbeta, Precision, Recall
torch.manual_seed(12)
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Beta should be a positive integer"):
Fbeta(0.0)
with pytest.raises(ValueError, match=r"Input precision metric should have average=False"):
p = Precision(average="micro")
Fbeta(1.0, precision=p)
with pytest.raises(ValueError, match=r"Input recall metric should have average=False"):
r = Recall(average="samples")
Fbeta(1.0, recall=r)
with pytest.raises(ValueError, match=r"If precision argument is provided, output_transform should be None"):
p = Precision(average=False)
Fbeta(1.0, precision=p, output_transform=lambda x: x)
with pytest.raises(ValueError, match=r"If recall argument is provided, output_transform should be None"):
r = Recall(average=False)
Fbeta(1.0, recall=r, output_transform=lambda x: x)
def _output_transform(output):
return output["y_pred"], output["y"]
@pytest.mark.parametrize(
"p, r, average, output_transform",
[
(None, None, False, None),
(None, None, True, None),
(None, None, False, _output_transform),
(None, None, True, _output_transform),
(Precision(average=False), Recall(average=False), False, None),
(Precision(average=False), Recall(average=False), True, None),
],
)
def test_integration(p, r, average, output_transform):
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
if output_transform is not None:
return {"y_pred": torch.from_numpy(y_pred_batch), "y": torch.from_numpy(y_true_batch)}
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
f2 = Fbeta(beta=2.0, average=average, precision=p, recall=r, output_transform=output_transform)
f2.attach(evaluator, "f2")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
f2_true = fbeta_score(y_true, np.argmax(y_pred, axis=-1), average="macro" if average else None, beta=2.0)
np.testing.assert_allclose(np.array(f2_true), np.array(state.metrics["f2"]))
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(p, r, average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
fbeta = Fbeta(beta=2.5, average=average, device=metric_device)
fbeta.attach(engine, "f2.5")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "f2.5" in engine.state.metrics
res = engine.state.metrics["f2.5"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = fbeta_score(
y_true.cpu().numpy(),
torch.argmax(y_preds, dim=1).cpu().numpy(),
beta=2.5,
average="macro" if average else None,
)
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(None, None, average=True, n_epochs=1, metric_device=metric_device)
_test(None, None, average=True, n_epochs=2, metric_device=metric_device)
precision = Precision(average=False, device=metric_device)
recall = Recall(average=False, device=metric_device)
_test(precision, recall, average=False, n_epochs=1, metric_device=metric_device)
_test(precision, recall, average=False, n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanSquaredError
def test_zero_sample():
mse = MeanSquaredError()
with pytest.raises(
NotComputableError, match=r"MeanSquaredError must have at least one example before it can be computed"
):
mse.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mse = MeanSquaredError()
y_pred, y, batch_size = test_case
mse.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mse.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mse.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_res = np.power((np_y - np_y_pred), 2.0).sum() / np_y.shape[0]
assert isinstance(mse.compute(), float)
assert mse.compute() == np_res
def _test_distrib_integration(device, tol=1e-6):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
def _test(metric_device):
n_iters = 100
batch_size = 10
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanSquaredError(device=metric_device)
m.attach(engine, "mse")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mse" in engine.state.metrics
res = engine.state.metrics["mse"]
true_res = np.mean(np.power((y_true - y_preds).cpu().numpy(), 2.0))
assert pytest.approx(res, rel=tol) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
device = torch.device(device)
mse = MeanSquaredError(device=metric_device)
for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mse.update((y_pred, y))
for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mse = MeanSquaredError()
y_pred = torch.tensor([[2.0], [-2.0]], requires_grad=True)
y = torch.zeros(2)
mse.update((y_pred, y))
assert not mse._sum_of_squared_errors.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import TopKCategoricalAccuracy
def test_zero_div():
acc = TopKCategoricalAccuracy(2)
with pytest.raises(
NotComputableError, match=r"TopKCategoricalAccuracy must have at least one example before it can be computed"
):
acc.compute()
def test_compute():
acc = TopKCategoricalAccuracy(2)
y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 0.5
acc.reset()
y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 1.0
def top_k_accuracy(y_true, y_pred, k=5, normalize=True):
import numpy as np
# Taken from
# https://github.com/scikit-learn/scikit-learn/blob/4685cb5c50629aba4429f6701585f82fc3eee5f7/
# sklearn/metrics/classification.py#L187
if len(y_true.shape) == 2:
y_true = np.argmax(y_true, axis=1)
num_obs, num_labels = y_pred.shape
idx = num_labels - k - 1
counter = 0.0
argsorted = np.argsort(y_pred, axis=1)
for i in range(num_obs):
if y_true[i] in argsorted[i, idx + 1 :]:
counter += 1.0
if normalize:
return counter * 1.0 / num_obs
else:
return counter
def _test_distrib_integration(device):
from ignite.engine import Engine
def _test(n_epochs, metric_device):
n_iters = 100
batch_size = 16
n_classes = 10
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
k = 5
acc = TopKCategoricalAccuracy(k=k, device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = top_k_accuracy(y_true.cpu().numpy(), y_preds.cpu().numpy(), k=k)
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
acc = TopKCategoricalAccuracy(2, device=metric_device)
assert acc._device == metric_device
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from typing import Callable, Optional, Union
from unittest.mock import patch
import pytest
import torch
import torchvision
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
class DummyInceptionMetric(_BaseInceptionMetric):
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(DummyInceptionMetric, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
def reset(self):
pass
def compute(self):
pass
def update(self, output):
self._extract_features(output)
def test_dummy_metric():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
DummyInceptionMetric(num_features=-1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
DummyInceptionMetric(feature_extractor=torch.nn.Identity())
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module, got"):
DummyInceptionMetric(num_features=1000, feature_extractor=lambda x: x)
assert isinstance(DummyInceptionMetric(num_features=10)._feature_extractor, torch.nn.Identity)
def test_inception_extractor_wrong_inputs():
with pytest.raises(ValueError, match=r"Inputs should be a tensor of dim 4"):
InceptionModel(return_features=True)(torch.rand(2))
with pytest.raises(ValueError, match=r"Inputs should be a tensor with 3 channels"):
InceptionModel(return_features=True)(torch.rand(2, 2, 2, 0))
def test_inception_model_probability():
x = torch.rand(2, 3, 299, 299)
y = InceptionModel(return_features=False)(x)
assert pytest.approx(torch.sum(y[0]).item()) == 1.0
assert pytest.approx(torch.sum(y[1]).item()) == 1.0
assert torch.all(0 <= y)
@pytest.fixture()
def mock_no_torchvision():
with patch.dict("sys.modules", {"torchvision": None}):
yield torchvision
def test_no_torchvision(mock_no_torchvision):
with pytest.raises(ModuleNotFoundError, match=r"This module requires torchvision to be installed."):
InceptionModel(return_features=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
images = torch.rand(10, 3, 299, 299)
result = InceptionModel(return_features=False, device="cuda")(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 1000])
result = InceptionModel(return_features=False)(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 1000])
images = torch.rand(10, 5)
result = DummyInceptionMetric(num_features=5, device="cuda")._extract_features(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 5])
result = DummyInceptionMetric(num_features=5)._extract_features(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 5])
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.inception_score import InceptionScore
def calculate_inception_score(p_yx):
p_y = torch.unsqueeze(p_yx.mean(axis=0), 0)
kl_d = torch.kl_div(torch.log(p_y), p_yx)
sum_kl_d = kl_d.sum(axis=1)
avg_kl_d = torch.mean(sum_kl_d)
is_score = torch.exp(avg_kl_d)
return is_score
def test_inception_score():
p_yx = torch.rand(20, 10)
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity())
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
p_yx = torch.rand(20, 3, 299, 299)
m = InceptionScore()
m.update(p_yx)
assert isinstance(m.compute(), float)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
p_yx = torch.rand(20, 10).to("cpu")
m = InceptionScore(num_features=10, feature_extractor=torch.nn.Identity().to("cpu"), device="cuda")
m.update(p_yx)
assert pytest.approx(calculate_inception_score(p_yx)) == m.compute()
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
InceptionScore(num_features=-1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(
NotComputableError, match=r"InceptionScore must have at least one example before it can be computed."
):
InceptionScore(num_features=1000, feature_extractor=torch.nn.Identity()).compute()
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
InceptionScore(feature_extractor=torch.nn.Identity())
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(metric_device):
n_iters = 60
s = 16
offset = n_iters * s
n_probabilities = 10
y = torch.rand(offset * idist.get_world_size(), n_probabilities)
def update(_, i):
return y[i * s + rank * offset : (i + 1) * s + rank * offset, :]
engine = Engine(update)
m = InceptionScore(num_features=n_probabilities, feature_extractor=torch.nn.Identity(), device=metric_device)
m.attach(engine, "InceptionScore")
engine.run(data=list(range(n_iters)), max_epochs=1)
assert "InceptionScore" in engine.state.metrics
assert pytest.approx(calculate_inception_score(y), rel=1e-5) == m.compute()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = torch.device(f"cuda:{local_rank}")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import re
from unittest.mock import patch
import pytest
import pytorch_fid.fid_score as pytorch_fid_score
import scipy
import torch
from numpy import cov
import ignite.distributed as idist
from ignite.metrics.gan.fid import FID, fid_score
@pytest.fixture()
def mock_no_scipy():
with patch.dict("sys.modules", {"scipy": None}):
yield scipy
def test_no_scipy(mock_no_scipy):
with pytest.raises(ModuleNotFoundError, match=r"This module requires scipy to be installed."):
FID()
with pytest.raises(ModuleNotFoundError, match=r"fid_score requires scipy to be installed."):
fid_score(0, 0, 0, 0)
@pytest.fixture()
def mock_no_numpy():
with patch.dict("sys.modules", {"numpy": None}):
yield scipy
def test_no_numpy(mock_no_numpy):
with pytest.raises(ModuleNotFoundError, match=r"This module requires numpy to be installed."):
FID()
with pytest.raises(ModuleNotFoundError, match=r"fid_score requires numpy to be installed."):
fid_score(0, 0, 0, 0)
def test_fid_function():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
sigma1 = torch.tensor(sigma1, dtype=torch.float64)
sigma2 = torch.tensor(sigma2, dtype=torch.float64)
assert pytest.approx(fid_score(mu1, mu2, sigma1, sigma2), rel=1e-5) == pytorch_fid_score.calculate_frechet_distance(
mu1, sigma1, mu2, sigma2
)
def test_compute_fid_from_features():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity())
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
assert (
pytest.approx(pytorch_fid_score.calculate_frechet_distance(mu1, sigma1, mu2, sigma2), rel=1e-5)
== fid_scorer.compute()
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
train_samples, test_samples = torch.rand(10, 10).to("cpu"), torch.rand(10, 10).to("cpu")
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity().to("cpu"), device="cuda")
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), cov(train_samples, rowvar=False)
mu2, sigma2 = test_samples.mean(axis=0), cov(test_samples, rowvar=False)
assert (
pytest.approx(pytorch_fid_score.calculate_frechet_distance(mu1, sigma1, mu2, sigma2), rel=1e-4)
== fid_scorer.compute()
)
def test_compute_fid_sqrtm():
mu1 = torch.tensor([0, 0])
mu2 = torch.tensor([0, 0])
sigma1 = torch.tensor([[-1, 1], [1, 1]], dtype=torch.float64)
sigma2 = torch.tensor([[1, 0], [0, 1]], dtype=torch.float64)
with pytest.raises(ValueError, match=r"Imaginary component "):
fid_score(mu1, mu2, sigma1, sigma2)
sigma1 = torch.ones((2, 2), dtype=torch.float64) * torch.finfo(torch.float64).max
sigma2 = torch.tensor([[1, 0.5], [0, 0.5]], dtype=torch.float64)
assert torch.isinf(torch.tensor(fid_score(mu1, mu2, sigma1, sigma2)))
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero"):
FID(num_features=-1, feature_extractor=torch.nn.Identity())
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.tensor([[], []]))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1, got: 0"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 2, 0))
err_str = (
"Number of Training Features and Testing Features should be equal (torch.Size([9, 2]) != torch.Size([5, 2]))"
)
with pytest.raises(ValueError, match=re.escape(err_str)):
FID(num_features=2, feature_extractor=torch.nn.Identity()).update((torch.rand(9, 2), torch.rand(5, 2)))
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module"):
FID(num_features=1, feature_extractor=lambda x: x)
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
FID(feature_extractor=torch.nn.Identity())
def test_statistics():
train_samples, test_samples = torch.rand(10, 10), torch.rand(10, 10)
fid_scorer = FID(num_features=10, feature_extractor=torch.nn.Identity())
fid_scorer.update([train_samples[:5], test_samples[:5]])
fid_scorer.update([train_samples[5:], test_samples[5:]])
mu1, sigma1 = train_samples.mean(axis=0), torch.tensor(cov(train_samples, rowvar=False))
mu2, sigma2 = test_samples.mean(axis=0), torch.tensor(cov(test_samples, rowvar=False))
fid_mu1 = fid_scorer._train_total / fid_scorer._num_examples
fid_sigma1 = fid_scorer._get_covariance(fid_scorer._train_sigma, fid_scorer._train_total)
fid_mu2 = fid_scorer._test_total / fid_scorer._num_examples
fid_sigma2 = fid_scorer._get_covariance(fid_scorer._test_sigma, fid_scorer._test_total)
assert torch.isclose(mu1.double(), fid_mu1).all()
for cov1, cov2 in zip(sigma1, fid_sigma1):
assert torch.isclose(cov1.double(), cov2, rtol=1e-04, atol=1e-04).all()
assert torch.isclose(mu2.double(), fid_mu2).all()
for cov1, cov2 in zip(sigma2, fid_sigma2):
assert torch.isclose(cov1.double(), cov2, rtol=1e-04, atol=1e-04).all()
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(metric_device):
n_iters = 60
s = 16
offset = n_iters * s
n_features = 10
y_pred = torch.rand(offset * idist.get_world_size(), n_features)
y_true = torch.rand(offset * idist.get_world_size(), n_features)
def update(_, i):
return (
y_pred[i * s + rank * offset : (i + 1) * s + rank * offset, :],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset, :],
)
engine = Engine(update)
m = FID(num_features=n_features, feature_extractor=torch.nn.Identity(), device=metric_device)
m.attach(engine, "fid")
engine.run(data=list(range(n_iters)), max_epochs=1)
assert "fid" in engine.state.metrics
evaluator = pytorch_fid_score.calculate_frechet_distance
mu1, sigma1 = y_pred.mean(axis=0).to("cpu"), cov(y_pred.to("cpu"), rowvar=False)
mu2, sigma2 = y_true.mean(axis=0).to("cpu"), cov(y_true.to("cpu"), rowvar=False)
assert pytest.approx(evaluator(mu1, sigma1, mu2, sigma2), rel=1e-5) == m.compute()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = torch.device(f"cuda:{local_rank}")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import pytest
from ignite.metrics.nlp.utils import lcs, modified_precision, ngrams
@pytest.mark.parametrize(
"sequence, n, expected_keys, expected_values",
[
([], 1, [], []),
([0, 1, 2], 1, [(0,), (1,), (2,)], [1, 1, 1]),
([0, 1, 2], 2, [(0, 1), (1, 2)], [1, 1]),
([0, 1, 2], 3, [(0, 1, 2)], [1]),
([0, 0, 0], 1, [(0,)], [3]),
([0, 0, 0], 2, [(0, 0)], [2]),
("abcde", 4, [("a", "b", "c", "d"), ("b", "c", "d", "e")], [1, 1]),
],
)
def test_ngrams(sequence, n, expected_keys, expected_values):
ngrams_counter = ngrams(sequence=sequence, n=n)
assert list(ngrams_counter.values()) == expected_values
assert list(ngrams_counter.keys()) == expected_keys
@pytest.mark.parametrize(
"seq_a, seq_b, expected",
[([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4)],
)
def test_lcs(seq_a, seq_b, expected):
assert lcs(seq_a, seq_b) == expected
def test_modified_precision_empty():
for k in range(1, 5):
n, d = modified_precision([[]], [], k)
assert n == 0 and d == 0
n, d = modified_precision([[]], [0], k)
assert n == 0 and d == (k == 1)
n, d = modified_precision([[0]], [], k)
assert n == 0 and d == 0
n, d = modified_precision([[]], list(range(k)), k)
assert n == 0 and d == 1
n, d = modified_precision([list(range(k))], [], k)
assert n == 0 and d == 0
@pytest.mark.parametrize(
"references, candidate, expected",
[
([[0, 0, 0], [1, 2]], [1, 2, 3, 4], ((2, 4), (1, 3), (0, 2))),
([[0, 1, 2], [0, 0, 3]], [0, 0, 0, 1, 2], ((4, 5), (3, 4), (1, 3))),
([[0, 1, 2], [3, 0, 3]], [3, 0, 0, 1, 2], ((4, 5), (3, 4), (1, 3))),
],
)
def test_modified_precision(references, candidate, expected):
for n, (e_n, e_d) in enumerate(expected, start=1):
n, d = modified_precision(references, candidate, n)
assert n == e_n and d == e_d
|
import os
import warnings
from collections import Counter
import pytest
import torch
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Bleu
from . import CorpusForTest
corpus = CorpusForTest(lower_split=True)
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"ngram order must be greater than zero"):
Bleu(ngram=0)
with pytest.raises(ValueError, match=r"Smooth is not valid"):
Bleu(smooth="fake")
with pytest.raises(ValueError, match=r"nb of candidates should be equal to nb of reference lists"):
Bleu()._corpus_bleu(references=[[0], [0]], candidates=[[0]])
with pytest.raises(NotComputableError):
Bleu().compute()
with pytest.raises(ValueError, match='Average must be either "macro" or "micro"'):
Bleu(average="macros")
parametrize_args = (
"candidates, references",
[
([["a", "a", "a", "b", "c"]], [[["a", "b", "c"], ["a", "a", "d"]]]),
corpus.sample_1,
corpus.sample_2,
corpus.sample_3,
corpus.sample_4,
],
)
def _test(candidates, references, average, smooth="no_smooth", smooth_nltk_fn=None, ngram_range=8):
for i in range(1, ngram_range):
weights = tuple([1 / i] * i)
bleu = Bleu(ngram=i, average=average, smooth=smooth)
if average == "macro":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference = sentence_bleu(
references[0], candidates[0], weights=weights, smoothing_function=smooth_nltk_fn
)
assert pytest.approx(reference) == bleu._sentence_bleu(references[0], candidates[0])
elif average == "micro":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference = corpus_bleu(references, candidates, weights=weights, smoothing_function=smooth_nltk_fn)
assert pytest.approx(reference) == bleu._corpus_bleu(references, candidates)
bleu.update((candidates, references))
assert pytest.approx(reference) == bleu.compute()
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu(candidates, references):
_test(candidates, references, "macro")
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu(candidates, references):
_test(candidates, references, "micro")
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_smooth1(candidates, references):
_test(candidates, references, "macro", "smooth1", SmoothingFunction().method1)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_smooth1(candidates, references):
_test(candidates, references, "micro", "smooth1", SmoothingFunction().method1)
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_nltk_smooth2(candidates, references):
_test(candidates, references, "macro", "nltk_smooth2", SmoothingFunction().method2)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_nltk_smooth2(candidates, references):
_test(candidates, references, "micro", "nltk_smooth2", SmoothingFunction().method2)
@pytest.mark.parametrize(*parametrize_args)
def test_macro_bleu_smooth2(candidates, references):
_test(candidates, references, "macro", "smooth2", SmoothingFunction().method2, 3)
@pytest.mark.parametrize(*parametrize_args)
def test_micro_bleu_smooth2(candidates, references):
_test(candidates, references, "micro", "smooth2", SmoothingFunction().method2, 3)
def test_accumulation_macro_bleu():
bleu = Bleu(ngram=4, smooth="smooth2")
bleu.update(([corpus.cand_1], [corpus.references_1]))
bleu.update(([corpus.cand_2a], [corpus.references_2]))
bleu.update(([corpus.cand_2b], [corpus.references_2]))
bleu.update(([corpus.cand_3], [corpus.references_2]))
value = bleu._sentence_bleu(corpus.references_1, corpus.cand_1)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_2a)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_2b)
value += bleu._sentence_bleu(corpus.references_2, corpus.cand_3)
assert bleu.compute() == value / 4
def test_accumulation_micro_bleu():
bleu = Bleu(ngram=4, smooth="smooth2", average="micro")
bleu.update(([corpus.cand_1], [corpus.references_1]))
bleu.update(([corpus.cand_2a], [corpus.references_2]))
bleu.update(([corpus.cand_2b], [corpus.references_2]))
bleu.update(([corpus.cand_3], [corpus.references_2]))
value = bleu._corpus_bleu(
[corpus.references_1, corpus.references_2, corpus.references_2, corpus.references_2],
[corpus.cand_1, corpus.cand_2a, corpus.cand_2b, corpus.cand_3],
)
assert bleu.compute() == value
def test_bleu_batch_macro():
bleu = Bleu(ngram=4)
# Batch size 3
hypotheses = [corpus.cand_1, corpus.cand_2a, corpus.cand_2b]
refs = [corpus.references_1, corpus.references_2, corpus.references_2]
bleu.update((hypotheses, refs))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference_bleu_score = (
sentence_bleu(refs[0], hypotheses[0])
+ sentence_bleu(refs[1], hypotheses[1])
+ sentence_bleu(refs[2], hypotheses[2])
) / 3
assert pytest.approx(bleu.compute()) == reference_bleu_score
value = 0
for _hypotheses, _refs in zip(hypotheses, refs):
value += bleu._sentence_bleu(_refs, _hypotheses)
bleu.update(([_hypotheses], [_refs]))
ref_1 = value / len(refs)
ref_2 = bleu.compute()
assert pytest.approx(ref_1) == reference_bleu_score
assert pytest.approx(ref_2) == reference_bleu_score
def test_bleu_batch_micro():
bleu = Bleu(ngram=4, average="micro")
# Batch size 3
hypotheses = [corpus.cand_1, corpus.cand_2a, corpus.cand_2b]
refs = [corpus.references_1, corpus.references_2, corpus.references_2]
bleu.update((hypotheses, refs))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference_bleu_score = corpus_bleu(refs, hypotheses)
assert pytest.approx(bleu.compute()) == reference_bleu_score
assert pytest.approx(bleu._corpus_bleu(refs, hypotheses)) == reference_bleu_score
@pytest.mark.parametrize(
"candidates, references",
[
(corpus.cand_1, corpus.references_1),
(corpus.cand_2a, corpus.references_2),
(corpus.cand_2b, corpus.references_2),
(corpus.cand_1, corpus.references_1),
],
)
def test_n_gram_counter(candidates, references):
bleu = Bleu(ngram=4)
hyp_length, ref_length = bleu._n_gram_counter([references], [candidates], Counter(), Counter())
assert hyp_length == len(candidates)
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - len(candidates)), ref_len))
assert ref_length == closest_ref_len
def _test_macro_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
return data[i + size * rank]
def _test(metric_device):
engine = Engine(update)
m = Bleu(ngram=4, smooth="smooth2")
m.attach(engine, "bleu")
engine.run(data=list(range(size)), max_epochs=1)
assert "bleu" in engine.state.metrics
ref_bleu = 0
for candidates, references in data:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ref_bleu += sentence_bleu(
references[0],
candidates[0],
weights=[0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method2,
)
assert pytest.approx(engine.state.metrics["bleu"]) == ref_bleu / len(data)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_micro_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
return data[i + size * rank]
def _test(metric_device):
engine = Engine(update)
m = Bleu(ngram=4, smooth="smooth2", average="micro")
m.attach(engine, "bleu")
engine.run(data=list(range(size)), max_epochs=1)
assert "bleu" in engine.state.metrics
ref_bleu = 0
references = []
candidates = []
for _candidates, _references in data:
references.append(_references[0])
candidates.append(_candidates[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ref_bleu += corpus_bleu(
references,
candidates,
weights=[0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method2,
)
assert pytest.approx(engine.state.metrics["bleu"]) == ref_bleu
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_macro_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_micro_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_macro_distrib_integration(device)
_test_micro_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
__all__ = ["CorpusForTest"]
class CorpusForTest:
def __init__(self, lower_split=False):
def preproc(text):
if lower_split:
return text.lower().split()
else:
return text
# BLEU Paper examples
self.cand_1 = preproc("the the the the the the the")
self.ref_1a = preproc("The cat is on the mat")
self.ref_1b = preproc("There is a cat on the mat")
self.cand_2a = preproc(
"It is a guide to action which ensures that the military always obeys the commands of the party"
)
self.cand_2b = preproc("It is to insure the troops forever hearing the activity guidebook that " "party direct")
self.ref_2a = preproc(
"It is a guide to action that ensures that the military will forever heed " "Party commands"
)
self.ref_2b = preproc(
"It is the guiding principle which guarantees the military forces always being under the command of "
"the Party"
)
self.ref_2c = preproc("It is the practical guide for the army always to heed the directions of the party")
self.cand_3 = preproc("of the")
self.references_1 = [self.ref_1a, self.ref_1b]
self.references_2 = [self.ref_2a, self.ref_2b, self.ref_2c]
self.sample_1 = ([self.cand_1], [self.references_1])
self.sample_2 = ([self.cand_3], [self.references_2])
self.sample_3 = ([self.cand_2a], [self.references_2])
self.sample_4 = ([self.cand_2b], [self.references_2])
self.sample_5 = ([self.cand_2a, self.cand_2b], [self.references_2, self.references_2])
self.references_3 = [self.ref_2a, self.ref_2b]
self.references_4 = [self.ref_2b, self.ref_2c]
self.references_5 = [self.ref_2a, self.ref_2c]
self.chunks = [
([self.cand_1], [self.references_1]),
([self.cand_2a], [self.references_2]),
([self.cand_2b], [self.references_2]),
([self.cand_1], [[self.ref_1a]]),
([self.cand_2a], [self.references_3]),
([self.cand_2b], [self.references_3]),
([self.cand_1], [[self.ref_1b]]),
([self.cand_2a], [self.references_4]),
([self.cand_2b], [self.references_4]),
([self.cand_1], [self.references_1]),
([self.cand_2a], [self.references_5]),
([self.cand_2b], [self.references_5]),
([self.cand_1], [[self.ref_1a]]),
([self.cand_2a], [[self.ref_2a]]),
([self.cand_2b], [[self.ref_2c]]),
]
|
import os
import nltk
import pytest
import rouge as pyrouge
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.nlp import Rouge
from ignite.metrics.nlp.rouge import compute_ngram_scores, RougeL, RougeN
from . import CorpusForTest
nltk.download("punkt")
corpus = CorpusForTest()
@pytest.mark.parametrize(
"candidate, reference, n, expected_precision, expected_recall",
[
([], [], 1, 0, 0),
("abc", "ab", 1, 2 / 3, 2 / 2),
("abc", "ab", 2, 1 / 2, 1 / 1),
("abc", "ab", 3, 0, 0),
("ab", "abc", 1, 2 / 2, 2 / 3),
("ab", "cde", 1, 0 / 2, 0 / 3),
("aab", "aace", 1, 2 / 3, 2 / 4),
("aa", "aaa", 1, 2 / 2, 2 / 3),
("aaa", "aa", 1, 2 / 3, 2 / 2),
],
)
def test_compute_ngram_scores(candidate, reference, n, expected_precision, expected_recall):
scores = compute_ngram_scores(candidate, reference, n=n)
assert pytest.approx(scores.precision()) == expected_precision
assert pytest.approx(scores.recall()) == expected_recall
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"ngram order must be greater than zero"):
RougeN(ngram=0)
with pytest.raises(ValueError, match=r"alpha must be in interval \[0, 1\]"):
RougeN(alpha=-1)
with pytest.raises(ValueError, match=r"alpha must be in interval \[0, 1\]"):
RougeN(alpha=2)
with pytest.raises(ValueError, match=r"multiref : valid values are \['best', 'average'\] "):
RougeN(multiref="")
with pytest.raises(ValueError, match=r"variant must be 'L' or integer greater to zero"):
Rouge(variants=["error"])
with pytest.raises(NotComputableError):
RougeL().compute()
with pytest.raises(ValueError):
Rouge(multiref="unknown")
@pytest.mark.parametrize(
"ngram, candidate, reference, expected",
[
(1, [1, 2, 3], [1, 2], (2 / 3, 2 / 2)),
(2, [1, 2, 3], [1, 2], (1 / 2, 1 / 1)),
(1, "abcdef", "zbdfz", (3 / 6, 3 / 5)),
(2, "abcdef", "zbdfz", (0, 0)),
],
)
def test_rouge_n_alpha(ngram, candidate, reference, expected):
for alpha in [0, 1, 0.3, 0.5, 0.8]:
rouge = RougeN(ngram=ngram, alpha=alpha)
rouge.update(([candidate], [[reference]]))
results = rouge.compute()
assert results[f"Rouge-{ngram}-P"] == expected[0]
assert results[f"Rouge-{ngram}-R"] == expected[1]
try:
F = expected[0] * expected[1] / ((1 - alpha) * expected[0] + alpha * expected[1])
except ZeroDivisionError:
F = 0
assert results[f"Rouge-{ngram}-F"] == F
@pytest.mark.parametrize(
"candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5]
)
def test_rouge_metrics(candidates, references):
for multiref in ["average", "best"]:
# PERL 1.5.5 reference
apply_avg = multiref == "average"
apply_best = multiref == "best"
evaluator = pyrouge.Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=4,
apply_avg=apply_avg,
apply_best=apply_best,
alpha=0.5,
stemming=False,
ensure_compatibility=False,
)
scores = evaluator.get_scores(candidates, references)
lower_split_references = [
[ref.lower().split() for ref in refs_per_candidate] for refs_per_candidate in references
]
lower_split_candidates = [candidate.lower().split() for candidate in candidates]
m = Rouge(variants=[1, 2, 4, "L"], multiref=multiref, alpha=0.5)
m.update((lower_split_candidates, lower_split_references))
results = m.compute()
for key in ["1", "2", "4", "L"]:
assert pytest.approx(results[f"Rouge-{key}-R"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["r"]
assert pytest.approx(results[f"Rouge-{key}-P"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["p"]
assert pytest.approx(results[f"Rouge-{key}-F"], abs=1e-4) == scores[f"rouge-{key.lower()}"]["f"]
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
size = len(corpus.chunks)
data = []
for c in corpus.chunks:
data += idist.get_world_size() * [c]
def update(_, i):
candidate, references = data[i + size * rank]
lower_split_references = [reference.lower().split() for reference in references[0]]
lower_split_candidate = candidate[0].lower().split()
return [lower_split_candidate], [lower_split_references]
def _test(metric_device):
engine = Engine(update)
m = Rouge(variants=[1, 2, "L"], alpha=0.5, device=metric_device)
m.attach(engine, "rouge")
engine.run(data=list(range(size)), max_epochs=1)
assert "rouge" in engine.state.metrics
evaluator = pyrouge.Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=4,
apply_avg=True,
apply_best=False,
alpha=0.5,
stemming=False,
ensure_compatibility=False,
)
rouge_1_f, rouge_2_f, rouge_l_f = (0, 0, 0)
for candidate, references in data:
scores = evaluator.get_scores(candidate, references)
rouge_1_f += scores["rouge-1"]["f"]
rouge_2_f += scores["rouge-2"]["f"]
rouge_l_f += scores["rouge-l"]["f"]
assert pytest.approx(engine.state.metrics["Rouge-1-F"], abs=1e-4) == rouge_1_f / len(data)
assert pytest.approx(engine.state.metrics["Rouge-2-F"], abs=1e-4) == rouge_2_f / len(data)
assert pytest.approx(engine.state.metrics["Rouge-L-F"], abs=1e-4) == rouge_l_f / len(data)
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import argparse
import torch
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
import time
time.sleep(idist.get_rank() * 0.1)
print(idist.get_rank(), ": run with config:", config, "- kwargs:", kwargs, f"- backend={idist.backend()}")
t = torch.tensor([idist.get_rank()], device=idist.device())
t = idist.all_reduce(t)
t = t.item()
ws = idist.get_world_size()
assert t == ws * (ws - 1) / 2, f"{t} vs {ws}"
assert local_rank == idist.get_local_rank()
# Test init method:
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
true_init_method = config.get("true_init_method", None)
assert true_init_method is not None, true_init_method
assert _model._init_method == true_init_method
if __name__ == "__main__":
"""
Usage:
- No distributed configuration:
```
python tests/ignite/distributed/check_idist_parallel.py
```
- Launch 4 procs using gloo backend with `torchrun`:
```
torchrun --nproc_per_node=4 tests/ignite/distributed/check_idist_parallel.py --backend=gloo
```
- Launch 2 procs in 2 nodes using gloo backend with `torchrun` or `torch.distributed.launch`:
```
bash -c "torchrun --nnodes=2 --node_rank=0 \
--master_addr=localhost --master_port=3344 --nproc_per_node=2 \
tests/ignite/distributed/check_idist_parallel.py --backend=gloo &" \
&& bash -c "torchrun --nnodes=2 --node_rank=1 \
--master_addr=localhost --master_port=3344 --nproc_per_node=2 \
tests/ignite/distributed/check_idist_parallel.py --backend=gloo &"
```
- Spawn 4 procs in single node using gloo backend:
```
python tests/ignite/distributed/check_idist_parallel.py --backend=gloo --nproc_per_node=4
```
- Spawn 2 procs in 2 nodes using gloo backend:
```
bash -c "python tests/ignite/distributed/check_idist_parallel.py --backend=gloo \
--nproc_per_node=2 --nnodes=2 --node_rank=0 --master_addr=localhost --master_port=3344 &" \
&& bash -c "python tests/ignite/distributed/check_idist_parallel.py --backend=gloo \
--nproc_per_node=2 --nnodes=2 --node_rank=1 --master_addr=localhost --master_port=3344 &"
```
- Spawn 8 procs in single node using xla-tpu backend:
```
python tests/ignite/distributed/check_idist_parallel.py --backend=xla-tpu --nproc_per_node=8
```
"""
parser = argparse.ArgumentParser("Check idist.Parallel")
parser.add_argument("--backend", type=str, default=None)
parser.add_argument("--nproc_per_node", type=int, default=None)
parser.add_argument("--nnodes", type=int, default=None)
parser.add_argument("--node_rank", type=int, default=None)
parser.add_argument("--master_addr", type=str, default=None)
parser.add_argument("--master_port", type=str, default=None)
parser.add_argument("--init_method", type=str, default=None)
args = parser.parse_args()
config = {
"model": "resnet18",
"lr": 0.01,
}
if args.backend in ["gloo", "nccl"]:
config["true_init_method"] = args.init_method if args.init_method is not None else "env://"
dist_config = dict(
nproc_per_node=args.nproc_per_node,
nnodes=args.nnodes,
node_rank=args.node_rank,
master_addr=args.master_addr,
master_port=args.master_port,
)
if args.init_method is not None:
dist_config["init_method"] = args.init_method
with idist.Parallel(backend=args.backend, **dist_config) as parallel:
parallel.run(training, config, a=1, b=2)
|
import os
import pytest
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import _InfiniteConstantSampler
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import BatchSampler, RandomSampler, Sampler, SequentialSampler, WeightedRandomSampler
import ignite.distributed as idist
from ignite.distributed.auto import auto_dataloader, auto_model, auto_optim, DistributedProxySampler
class DummyDS(Dataset):
def __init__(self, length=10):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, index):
return index
class DummyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(DummyIterableDataset).__init__()
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def __len__(self):
return self.end - self.start
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if WORLD_SIZE not in env vars")
def test_auto_dataloader_warning(distributed_context_single_node_gloo):
with pytest.warns(UserWarning, match=r"Found batch_sampler in provided kwargs"):
auto_dataloader(
DummyDS(), batch_sampler=BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)
)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if WORLD_SIZE not in env vars")
def test_auto_dataloader_warning_distributed_sampler(distributed_context_single_node_gloo):
dataset = DummyDS()
rank = idist.get_rank()
world_size = idist.get_world_size()
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size, rank=rank))
if world_size > 1:
wrong_rank = (rank + 1) % world_size
expected_warning = f"Found distributed sampler with rank={wrong_rank}, but process rank is {rank}"
with pytest.warns(UserWarning, match=expected_warning):
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size, rank=wrong_rank))
expected_warning = f"Found distributed sampler with num_replicas={world_size + 1}, but world size is {world_size}"
with pytest.warns(UserWarning, match=expected_warning):
auto_dataloader(dataset, sampler=DistributedSampler(dataset, num_replicas=world_size + 1, rank=rank))
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_dataloader_warning_tpu():
with pytest.warns(UserWarning, match=r"Found incompatible options: xla support and pin_memory"):
auto_dataloader(DummyDS(), pin_memory=True)
def _test_auto_dataloader(ws, nproc, batch_size, num_workers=1, sampler_name=None, dl_type=DataLoader):
def _test(data):
if sampler_name is None:
sampler = None
elif sampler_name == "WeightedRandomSampler":
sampler = WeightedRandomSampler(weights=torch.ones(100), num_samples=100)
elif sampler_name == "DistributedSampler":
sampler = DistributedSampler(data, num_replicas=ws, rank=idist.get_rank())
else:
raise RuntimeError(f"Unknown sampler name: {sampler_name}")
# Test auto_dataloader
assert idist.get_world_size() == ws, f"{idist.get_world_size()} vs {ws}"
shuffle = sampler is None if not isinstance(data, IterableDataset) else False
dataloader = auto_dataloader(
data, batch_size=batch_size, num_workers=num_workers, sampler=sampler, shuffle=shuffle
)
assert isinstance(dataloader, dl_type)
if hasattr(dataloader, "_loader"):
dataloader = dataloader._loader
if ws < batch_size:
assert dataloader.batch_size == batch_size // ws
else:
assert dataloader.batch_size == batch_size
if ws <= num_workers:
assert dataloader.num_workers == (num_workers + nproc - 1) // nproc
else:
assert dataloader.num_workers == num_workers
if isinstance(data, IterableDataset):
sampler_type = _InfiniteConstantSampler
elif ws > 1:
if sampler is None or isinstance(sampler, DistributedSampler):
sampler_type = DistributedSampler
else:
sampler_type = DistributedProxySampler
else:
sampler_type = RandomSampler if sampler is None else type(sampler)
assert isinstance(dataloader.sampler, sampler_type)
if isinstance(dataloader, DataLoader):
assert dataloader.pin_memory == ("cuda" in idist.device().type)
data = torch.rand(100, 3, 12, 12)
_test(data)
if sampler_name is None:
data = DummyIterableDataset(0, 100)
_test(data)
def _test_auto_model(model, ws, device, sync_bn=False, **kwargs):
model = auto_model(model, sync_bn=sync_bn, **kwargs)
bnd = idist.backend()
if ws > 1 and torch.device(device).type in ("cuda", "cpu"):
if idist.has_native_dist_support and bnd in ("nccl", "gloo"):
assert isinstance(model, nn.parallel.DistributedDataParallel)
if sync_bn:
assert any([isinstance(m, nn.SyncBatchNorm) for m in model.modules()])
if "find_unused_parameters" in kwargs:
assert model.find_unused_parameters == kwargs["find_unused_parameters"]
elif idist.has_hvd_support and bnd in ("horovod",):
assert isinstance(model, nn.Module)
elif device != "cpu" and torch.cuda.is_available() and torch.cuda.device_count() > 1:
assert isinstance(model, nn.parallel.DataParallel)
else:
assert isinstance(model, nn.Module)
assert all(
[p.device.type == torch.device(device).type for p in model.parameters()]
), f"{[p.device.type for p in model.parameters()]} vs {torch.device(device).type}"
def _test_auto_model_optimizer(ws, device):
# Test auto_model
model = nn.Linear(10, 10)
_test_auto_model(model, ws, device)
model = nn.Sequential(nn.Linear(20, 100), nn.BatchNorm1d(100))
_test_auto_model(model, ws, device, sync_bn="cuda" in torch.device(device).type)
if ws > 1:
_test_auto_model(model, ws, device, find_unused_parameters=True)
_test_auto_model(model, ws, device, find_unused_parameters=False)
# Test auto_optim
bnd = idist.backend()
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = auto_optim(optimizer)
if idist.has_xla_support and "xla" in device:
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "wrapped_optimizer")
elif idist.has_hvd_support and bnd in ("horovod",):
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "_allreduce_grad_async")
else:
assert isinstance(optimizer, optim.SGD) and not hasattr(optimizer, "wrapped_optimizer")
if idist.has_hvd_support and bnd in ("horovod",):
backward_passes_per_step = 2
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = auto_optim(optimizer, backward_passes_per_step=backward_passes_per_step)
assert isinstance(optimizer, optim.SGD) and hasattr(optimizer, "backward_passes_per_step")
assert optimizer.backward_passes_per_step == backward_passes_per_step
def test_auto_methods_no_dist():
_test_auto_dataloader(1, 1, batch_size=1)
_test_auto_dataloader(1, 1, batch_size=10, num_workers=2)
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(1, 1, batch_size=10, sampler_name="DistributedSampler")
_test_auto_model_optimizer(1, "cuda" if torch.cuda.is_available() else "cpu")
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_auto_methods_gloo(distributed_context_single_node_gloo):
ws = distributed_context_single_node_gloo["world_size"]
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=2)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, sampler_name="DistributedSampler")
device = idist.device()
_test_auto_model_optimizer(ws, device)
if ws > 1 and device.type == "cpu":
# Pytorch <= 1.9.0 => AssertionError
# Pytorch > 1.9 => ValueError
# https://github.com/pytorch/pytorch/blob/master/torch/nn/parallel/distributed.py#L1498
with pytest.raises(
(AssertionError, ValueError), match=r"SyncBatchNorm layers only work with (GPU|CUDA) modules"
):
model = nn.Sequential(nn.Linear(20, 100), nn.BatchNorm1d(100))
auto_model(model, sync_bn=True)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_auto_methods_nccl(distributed_context_single_node_nccl):
ws = distributed_context_single_node_nccl["world_size"]
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=10)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="WeightedRandomSampler")
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="DistributedSampler")
device = idist.device()
_test_auto_model_optimizer(ws, device)
if ws > 1:
with pytest.raises(ValueError, match=r"Argument kwargs should not contain 'device_ids'"):
auto_model(nn.Linear(1, 1), device_ids=[0])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_auto_methods_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 10, 10), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1, 1, "WeightedRandomSampler"), np=np, do_init=True)
gloo_hvd_executor(_test_auto_dataloader, args=(np, np, 1, 1, "DistributedSampler"), np=np, do_init=True)
gloo_hvd_executor(_test_auto_model_optimizer, args=(np, device), np=np, do_init=True)
def _test_auto_methods_xla(index, ws):
dl_type = DataLoader
if ws > 1:
from ignite.distributed.auto import _MpDeviceLoader
dl_type = _MpDeviceLoader
try:
from torch_xla.distributed.parallel_loader import MpDeviceLoader
dl_type = MpDeviceLoader
except ImportError:
pass
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=10, num_workers=2, dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="WeightedRandomSampler", dl_type=dl_type)
_test_auto_dataloader(ws=ws, nproc=ws, batch_size=1, sampler_name="DistributedSampler", dl_type=dl_type)
device = "xla"
_test_auto_model_optimizer(ws, device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_methods_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_auto_methods_xla, args=(n,), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_auto_methods_xla():
_test_auto_methods_xla(index=0, ws=1)
def test_dist_proxy_sampler():
weights = torch.ones(100)
weights[:50] += 1
num_samples = 200
sampler = WeightedRandomSampler(weights, num_samples)
num_replicas = 8
dist_samplers = [DistributedProxySampler(sampler, num_replicas=num_replicas, rank=i) for i in range(num_replicas)]
for seed in range(100):
torch.manual_seed(seed)
true_indices = list(sampler)
indices_per_rank = []
for s in dist_samplers:
s.set_epoch(seed)
indices_per_rank += list(s)
set_indices_per_rank = set(indices_per_rank)
set_true_indices = set(true_indices)
assert (
set_indices_per_rank == set_true_indices
), f"{set_true_indices - set_indices_per_rank} | {set_indices_per_rank - set_true_indices}"
with pytest.raises(TypeError, match=r"Argument sampler should be instance of torch Sampler"):
DistributedProxySampler(None)
with pytest.raises(TypeError, match=r"Argument sampler should have length"):
DistributedProxySampler(Sampler([1]))
with pytest.raises(TypeError, match=r"Argument sampler must not be a distributed sampler already"):
DistributedProxySampler(DistributedSampler(sampler, num_replicas=num_replicas, rank=0))
|
import os
import subprocess
import sys
from pathlib import Path
import pytest
import torch
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support, has_native_dist_support, has_xla_support
def test_parallel_wrong_inputs():
with pytest.raises(ValueError, match=r"Unknown backend 'abc'. Available backends:"):
idist.Parallel(backend="abc")
with pytest.raises(ValueError, match=r"If backend is None, argument 'nnodes' should be also None"):
idist.Parallel(nnodes=2)
with pytest.raises(ValueError, match=r"Argument nproc_per_node should positive"):
idist.Parallel(backend="gloo", nproc_per_node=-1)
with pytest.raises(ValueError, match=r"Argument nnodes should positive"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=-1)
with pytest.raises(ValueError, match=r"If number of nodes larger than one"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2)
with pytest.raises(ValueError, match=r"Argument node_rank should be between 0 and"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=2)
with pytest.raises(ValueError, match=r"If number of nodes larger than one, arguments master_addr and master_port"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=1)
@pytest.fixture()
def exec_filepath():
fp = Path(__file__).parent / "check_idist_parallel.py"
assert fp.exists()
yield fp.as_posix()
def execute(cmd, env=None):
import ignite
env = dict(os.environ) if env is None else env
env["PYTHONPATH"] = f"{os.path.dirname(ignite.__path__[0])}"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
process.wait()
if process.returncode != 0:
print(str(process.stdout.read()) + str(process.stderr.read()))
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd, stderr=process.stderr.read())
return str(process.stdout.read()) + str(process.stderr.read())
def test_check_idist_parallel_no_dist(exec_filepath):
cmd = [sys.executable, "-u", exec_filepath]
out = execute(cmd)
assert "backend=None" in out
assert "in 1 processes" in out
assert "End of run" in out
def _test_check_idist_parallel_torch_launch(init_method, fp, backend, nprocs):
# torchrun --nproc_per_node=nprocs tests/ignite/distributed/check_idist_parallel.py --backend=backend
cmd = []
if Version(torch.__version__) >= Version("1.10.0"):
cmd += ["torchrun"]
else:
cmd += [
sys.executable,
"-m",
"torch.distributed.launch",
"--use_env",
]
cmd += [
f"--nproc_per_node={nprocs}",
fp,
f"--backend={backend}",
]
if init_method is not None:
cmd.append(f"--init_method={init_method}")
out = execute(cmd)
assert f"backend={backend}" in out
assert f"in {nprocs} processes" in out
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip because test uses torch launch")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_check_idist_parallel_torch_launch_n_procs_native(init_method, dirname, exec_filepath, backend):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
np = torch.cuda.device_count() if torch.cuda.is_available() else 4
_test_check_idist_parallel_torch_launch(init_method, exec_filepath, backend, np)
def _test_check_idist_parallel_hvdrun(fp, backend, nprocs):
# horovodrun -np=nprocs python tests/ignite/distributed/check_idist_parallel.py --backend=backend
cmd = [
"horovodrun",
"-np",
f"{nprocs}",
sys.executable,
fp,
f"--backend={backend}",
]
out = execute(cmd)
assert f"backend={backend}" in out
assert f"in {nprocs} processes" in out
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip because test uses horovodrun")
def test_check_idist_parallel_hvdrun_launch_n_procs(exec_filepath):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_hvdrun(exec_filepath, "horovod", np)
def _test_check_idist_parallel_spawn(fp, backend, nprocs):
# python tests/ignite/distributed/check_idist_parallel.py --backend=backend --nproc_per_node=nprocs
cmd = [sys.executable, fp, f"--backend={backend}", f"--nproc_per_node={nprocs}"]
out = execute(cmd)
assert f"backend={backend}" in out
assert "Spawn function" in out
assert f"in {nprocs} processes" in out
if "xla" not in backend:
assert "End of run" in out
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_check_idist_parallel_spawn_n_procs_native(exec_filepath, backend):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_spawn(exec_filepath, backend, np)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_smoke_test_check_idist_parallel_spawn_multinode_n_procs_gloo(exec_filepath):
# Just a smoke test from check_idist_parallel.py for an emulated multi-node configuration
cmd1 = "export CUDA_VISIBLE_DEVICES= && "
cmd1 += f'bash -c "{sys.executable} {exec_filepath} --backend=gloo --nproc_per_node=2 '
cmd1 += '--nnodes=2 --node_rank=0 --master_addr=localhost --master_port=3344 &"'
os.system(cmd1)
cmd2 = [
sys.executable,
exec_filepath,
"--backend=gloo",
"--nproc_per_node=2",
"--nnodes=2",
"--node_rank=1",
"--master_addr=localhost",
"--master_port=3344",
]
env = dict(os.environ)
env["CUDA_VISIBLE_DEVICES"] = ""
out = execute(cmd2, env=env)
assert "backend=gloo" in out
assert "nproc_per_node: 2" in out
assert "nnodes: 2" in out
assert "master_addr: localhost" in out
assert "master_port: 3344" in out
assert "End of run" in out
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_check_idist_parallel_spawn_n_procs_xla(exec_filepath):
n = int(os.environ["NUM_TPU_WORKERS"])
if n > 1:
_test_check_idist_parallel_spawn(exec_filepath, "xla-tpu", n)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_check_idist_parallel_spawn_n_procs_hvd(exec_filepath):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
_test_check_idist_parallel_spawn(exec_filepath, "horovod", np)
def _test_func(index, ws, device, backend, true_init_method):
assert 0 <= index < ws
assert index == idist.get_local_rank()
assert ws == idist.get_world_size()
assert torch.device(device).type == idist.device().type
assert backend == idist.backend()
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
assert _model._init_method == true_init_method
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", ["env://", "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_idist_parallel_spawn_n_procs_native(init_method, backend, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc_per_node = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node, init_method=init_method) as parallel:
parallel.run(_test_func, ws=nproc_per_node, device=device, backend=backend, true_init_method=init_method)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if not launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", ["env://", "tcp://0.0.0.0:29500", "FILE"])
@pytest.mark.parametrize(
"backend",
["gloo", pytest.param("nccl", marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU"))],
)
def test_idist_parallel_n_procs_native(init_method, backend, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('idist_parallel_n_procs_native')}/shared"
os.environ["RANK"] = str(local_rank)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=backend, init_method=init_method) as parallel:
parallel.run(_test_func, ws=world_size, device=device, backend=backend, true_init_method=init_method)
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_parallel_no_dist():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with idist.Parallel(backend=None) as parallel:
parallel.run(_test_func, ws=1, device=device, backend=None, true_init_method=None)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_parallel_spawn_params_xla():
res = idist.Parallel._setup_spawn_params(
nproc_per_node=8, nnodes=None, node_rank=None, master_addr=None, master_port=None, start_method="fork"
)
assert "nproc_per_node" in res and res["nproc_per_node"] == 8
assert "start_method" in res and res["start_method"] == "fork"
with idist.Parallel(backend="xla-tpu", nproc_per_node=8, start_method="fork") as parallel:
assert parallel.backend == "xla-tpu"
res = parallel._spawn_params
assert "nproc_per_node" in res and res["nproc_per_node"] == 8
assert "start_method" in res and res["start_method"] == "fork"
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.distributed.utils import has_hvd_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.skipif(has_hvd_support, reason="Skip if has Horovod package")
def test_hvd_distrib_spawn_no_hvd_support():
with pytest.raises(ValueError, match=r"Backend should be one of"):
idist.spawn("horovod", _test_distrib_config, args=("horovod", 1, "cpu"), nproc_per_node=1)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
def test_hvd_distrib_single_node_single_device():
import horovod.torch as hvd
idist.initialize("horovod")
device = "cpu" if torch.cuda.device_count() < 1 else "cuda"
local_rank = hvd.local_rank()
world_size = hvd.size()
rank = hvd.rank()
_test_distrib_config(local_rank, "horovod", world_size, device, rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test_hvd_distrib_single_node_spawn():
world_size = 4
idist.spawn("horovod", _test_distrib_config, args=("horovod", world_size, "cpu"), nproc_per_node=world_size)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_hvd_distrib_multi_node_spawn_raise_error():
world_size = 4
with pytest.raises(RuntimeError, match=r"For multi-node configuration, please set 'hosts' argument instead"):
idist.spawn(
"horovod", _test_distrib_config, args=("horovod", world_size, "cpu"), nproc_per_node=world_size, nnodes=2
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_hvd_distrib_single_node_spawn_cuda():
world_size = torch.cuda.device_count()
idist.spawn("horovod", _test_distrib_config, args=("horovod", world_size, "cuda"), nproc_per_node=world_size)
def _test_sync_as_hvd():
import horovod.torch as hvd
from ignite.distributed.comp_models.horovod import _HorovodDistModel
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
_test_sync(_HorovodDistModel)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif(os.getenv("HOROVOD_RANK", -1) == -1, reason="Skip as controller is not Gloo")
def test_sync_as_hvd():
_test_sync_as_hvd()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_sync_as_hvd_inside_gloo_executor(gloo_hvd_executor):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_sync_as_hvd, (), np=np)
def _test_idist_methods_in_hvd_context(backend, device):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
import horovod.torch as hvd
from ignite.distributed.utils import _SerialModel, _set_model
hvd.init()
_set_model(_SerialModel())
ws = hvd.size()
rank = hvd.rank()
local_rank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_methods_in_hvd_context(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_idist_methods_in_hvd_context, ("horovod", device), np=np)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_all_reduce_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_all_reduce, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_all_reduce_group, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist__model_methods_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib__get_max_length, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_all_gather_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_all_gather, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_all_gather_group, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_broadcast_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_broadcast, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_barrier_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_barrier, (device,), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_new_group_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_new_group, (device,), np=np, do_init=True)
def _test_idist_methods_overhead(ok_factor, sync_model):
import time
import horovod.torch as hvd
if sync_model:
idist.sync()
from ignite.distributed.comp_models.horovod import _HorovodDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _HorovodDistModel)
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for _ in range(m):
start = time.time()
for _ in range(n):
_ = hvd.size()
_ = hvd.rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_methods_overhead_hvd(gloo_hvd_executor):
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
ok_factor = 6.0
sync_model = False
gloo_hvd_executor(_test_idist_methods_overhead, (ok_factor, sync_model), np=np, do_init=True)
ok_factor = 2.5
sync_model = True
gloo_hvd_executor(_test_idist_methods_overhead, (ok_factor, sync_model), np=np, do_init=True)
@pytest.mark.distributed
@pytest.mark.skipif(not has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_idist_one_rank_only(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
np = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_one_rank_only, (device,), np=np, do_init=True)
gloo_hvd_executor(_test_distrib_one_rank_only_with_engine, (device,), np=np, do_init=True)
|
import torch
import ignite.distributed as idist
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_new_group,
_test_sync,
)
def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
else:
assert idist.device().type == "cpu"
assert idist.get_rank() == 0
assert idist.get_world_size() == 1
assert idist.get_local_rank() == 0
assert idist.model_name() == "serial"
from ignite.distributed.utils import _model, _SerialModel
_sanity_check()
assert isinstance(_model, _SerialModel)
idist.show_config()
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "ignite.distributed.utils INFO: distributed configuration: serial" in out[-1]
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
assert "ignite.distributed.utils INFO: rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: local rank: 0" in out[-1]
assert "ignite.distributed.utils INFO: world size: 1" in out[-1]
def test_sync_no_dist():
from ignite.distributed.comp_models import _SerialModel
_test_sync(_SerialModel)
def test_idist_methods_no_dist():
assert idist.get_world_size() < 2
assert idist.backend() is None, f"{idist.backend()}"
def test_idist__model_methods_no_dist():
_test_distrib__get_max_length("cpu")
if torch.cuda.device_count() > 1:
_test_distrib__get_max_length("cuda")
def test_idist_collective_ops_no_dist():
_test_distrib_all_reduce("cpu")
_test_distrib_all_gather("cpu")
_test_distrib_barrier("cpu")
_test_distrib_broadcast("cpu")
_test_distrib_new_group("cpu")
if torch.cuda.device_count() > 1:
_test_distrib_all_reduce("cuda")
_test_distrib_all_gather("cuda")
_test_distrib_barrier("cuda")
_test_distrib_broadcast("cuda")
_test_distrib_new_group("cuda")
|
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
from ignite.distributed.utils import sync
from ignite.engine import Engine, Events
def _sanity_check():
from ignite.distributed.utils import _model
assert _model.get_world_size() == _model.get_nnodes() * _model.get_nproc_per_node()
assert _model.get_local_rank() < _model.get_nproc_per_node()
assert _model.get_rank() < _model.get_world_size()
assert _model.get_node_rank() < _model.get_nnodes()
def _test_distrib_config(local_rank, backend, ws, true_device, rank=None, true_init_method=None):
assert idist.backend() == backend, f"{idist.backend()} vs {backend}"
this_device = idist.device()
assert isinstance(this_device, torch.device)
if backend in ("nccl", "gloo", "horovod") and "cuda" in this_device.type:
assert this_device.type == torch.device(true_device).type, f"{this_device} vs {true_device}"
elif backend in ("gloo", "horovod"):
assert this_device.type == torch.device(true_device).type
elif backend == "xla-tpu":
assert true_device in this_device.type
if rank is None:
if idist.model_name() == "native-dist":
rank = dist.get_rank()
if rank is not None:
assert idist.get_rank() == rank
assert idist.get_world_size() == ws
assert idist.get_local_rank() == local_rank
assert idist.model_name() in ("native-dist", "xla-dist", "horovod-dist")
_sanity_check()
if idist.model_name() == "native-dist":
from ignite.distributed.utils import _model
if true_init_method is not None:
assert _model._init_method == true_init_method
def _test_sync(cls):
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
sync()
from ignite.distributed.utils import _model
assert isinstance(_model, cls), f"{type(_model)} vs {cls}"
def _test_distrib__get_max_length(device):
ws = idist.get_world_size()
x = "_test_distrib__get_max_length" * (idist.get_rank() + 2)
from ignite.distributed.utils import _model
res = _model._get_max_length(x, device)
assert res == len("_test_distrib__get_max_length" * (ws + 1))
def _test_distrib_all_reduce(device):
res = idist.all_reduce(10)
assert res == 10 * idist.get_world_size()
t = torch.tensor(10, device=device)
res = idist.all_reduce(t)
assert res.item() == 10 * idist.get_world_size()
rank = idist.get_rank()
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t)
assert res.item() == sum([i * 2.0 + 1.0 for i in range(idist.get_world_size())])
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "MIN").item()
true_val = min([i * 2 + 1 for i in range(idist.get_world_size())])
assert res == true_val, f"{res} vs {true_val}"
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "MAX").item()
true_val = max([i * 2.0 + 1.0 for i in range(idist.get_world_size())])
assert res == true_val, f"{res} vs {true_val}"
t = torch.ones(4, 4, device=device) * (rank * 2.0 + 1.0)
res = idist.all_reduce(t, "MAX")
true_val = torch.ones(4, 4, device=device) * ((idist.get_world_size() - 1) * 2.0 + 1.0)
assert res.equal(true_val), f"{res} vs {true_val}"
t = torch.tensor(rank * 2.0 + 1.0, device=device)
res = idist.all_reduce(t, "PRODUCT").item()
true_val = 1
for v in [i * 2.0 + 1.0 for i in range(idist.get_world_size())]:
true_val *= v
assert res == true_val, f"{res} vs {true_val}"
if idist.get_world_size() > 1:
with pytest.raises(TypeError, match=r"Unhandled input type"):
idist.all_reduce("abc")
with pytest.raises(ValueError, match=r"Unsupported reduction operation"):
idist.all_reduce(10, op="ABC")
t = torch.tensor([0, 1, 2])
res = idist.all_reduce(t)
assert res.device == t.device, f"{res.device} vs {t.device}"
def _test_distrib_all_reduce_group(device):
if idist.get_world_size() > 1 and idist.backend() is not None:
ranks = [0, 1]
rank = idist.get_rank()
t = torch.tensor([rank], device=device)
bnd = idist.backend()
group = idist.new_group(ranks)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group=group)
else:
res = idist.all_reduce(t, group=group)
assert res == torch.tensor([sum(ranks)], device=device)
t = torch.tensor([rank], device=device)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group=ranks)
else:
res = idist.all_reduce(t, group=ranks)
assert res == torch.tensor([sum(ranks)], device=device)
ranks = "abc"
if bnd in ("nccl", "gloo", "mpi"):
with pytest.raises(ValueError, match=r"Argument group should be list of int or ProcessGroup"):
res = idist.all_reduce(t, group="abc")
elif bnd in ("xla-tpu"):
with pytest.raises(ValueError, match=r"Argument group should be list of int"):
res = idist.all_reduce(t, group="abc")
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_reduce with group for horovod is not implemented"):
res = idist.all_reduce(t, group="abc")
def _test_distrib_all_gather(device):
rank = idist.get_rank()
ws = idist.get_world_size()
res = torch.tensor(idist.all_gather(10), device=device)
true_res = torch.tensor([10] * ws, device=device)
assert (res == true_res).all()
t = torch.tensor(rank, device=device)
res = idist.all_gather(t)
true_res = torch.tensor([i for i in range(ws)], device=device)
assert (res == true_res).all()
x = "test-test"
if rank == 0:
x = "abc"
res = idist.all_gather(x)
true_res = ["abc"] + ["test-test"] * (ws - 1)
assert res == true_res
base_x = "tests/ignite/distributed/utils/test_native.py" * 2000
x = base_x
if rank == 0:
x = "abc"
res = idist.all_gather(x)
true_res = ["abc"] + [base_x] * (ws - 1)
assert res == true_res
t = torch.arange(100, device=device).reshape(4, 25) * (rank + 1)
in_dtype = t.dtype
res = idist.all_gather(t)
assert res.shape == (ws * 4, 25)
assert res.dtype == in_dtype
true_res = torch.zeros(ws * 4, 25, device=device)
for i in range(ws):
true_res[i * 4 : (i + 1) * 4, ...] = torch.arange(100, device=device).reshape(4, 25) * (i + 1)
assert (res == true_res).all()
if ws > 1 and idist.backend() != "xla-tpu":
t = {
"a": [rank + 1, rank + 2, torch.tensor(rank + 3, device=device)],
"b": torch.tensor([[rank + 1, rank + 2, rank + 3]], device=device),
"c": {"abcd": rank, "cdfg": torch.tensor(rank, dtype=torch.uint8, device=device)},
}
res = idist.all_gather(t)
assert isinstance(res, list) and len(res) == ws
for i, obj in enumerate(res):
assert isinstance(obj, dict)
assert list(obj.keys()) == ["a", "b", "c"], obj
expected_device = (
device if torch.device(device).type == "cpu" else torch.device(f"{torch.device(device).type}:{i}")
)
expected = {
"a": [i + 1, i + 2, torch.tensor(i + 3, device=expected_device)],
"b": torch.tensor([[i + 1, i + 2, i + 3]], device=expected_device),
"c": {"abcd": i, "cdfg": torch.tensor(i, dtype=torch.uint8, device=expected_device)},
}
assert obj["a"] == expected["a"]
assert (obj["b"] == expected["b"]).all()
assert obj["c"] == expected["c"]
def _test_distrib_all_gather_group(device):
if idist.get_world_size() > 1:
ranks = list(range(idist.get_world_size() - 1, 0, -1)) # [0, 1, 2, 3] -> [3, 2, 1]
rank = idist.get_rank()
bnd = idist.backend()
t = torch.tensor([rank], device=device)
group = idist.new_group(ranks)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=group)
else:
res = idist.all_gather(t, group=group)
if rank in ranks:
assert torch.equal(res, torch.tensor(ranks, device=device))
else:
assert res == t
t = torch.tensor([rank], device=device)
if bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=ranks)
else:
res = idist.all_gather(t, group=ranks)
if rank in ranks:
assert torch.equal(res, torch.tensor(ranks, device=device))
else:
assert res == t
t = {
"a": [rank + 1, rank + 2, torch.tensor(rank + 3, device=device)],
"b": torch.tensor([[rank + 1, rank + 2, rank + 3]], device=device),
"c": {"abcd": rank, "cdfg": torch.tensor(rank, dtype=torch.uint8, device=device)},
}
if bnd in ("xla-tpu"):
with pytest.raises(NotImplementedError, match=r"all_gather on object is not implemented for xla"):
res = idist.all_gather(t, group=ranks)
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group=ranks)
else:
res = idist.all_gather(t, group=ranks)
if rank in ranks:
assert isinstance(res, list) and len(res) == len(ranks)
for i, obj in zip(ranks, res):
assert isinstance(obj, dict)
assert list(obj.keys()) == ["a", "b", "c"], obj
expected_device = (
device
if torch.device(device).type == "cpu"
else torch.device(f"{torch.device(device).type}:{i}")
)
expected = {
"a": [i + 1, i + 2, torch.tensor(i + 3, device=expected_device)],
"b": torch.tensor([[i + 1, i + 2, i + 3]], device=expected_device),
"c": {"abcd": i, "cdfg": torch.tensor(i, dtype=torch.uint8, device=expected_device)},
}
assert obj["a"] == expected["a"], (obj, expected)
assert (obj["b"] == expected["b"]).all(), (obj, expected)
assert obj["c"] == expected["c"], (obj, expected)
else:
assert res == t
if bnd in ("nccl", "gloo", "mpi"):
with pytest.raises(ValueError, match=r"Argument group should be list of int or ProcessGroup"):
res = idist.all_gather(t, group="abc")
elif bnd in ("xla-tpu"):
with pytest.raises(ValueError, match=r"Argument group should be list of int"):
res = idist.all_gather(t, group="abc")
elif bnd in ("horovod"):
with pytest.raises(NotImplementedError, match=r"all_gather with group for horovod is not implemented"):
res = idist.all_gather(t, group="abc")
def _test_distrib_broadcast(device):
rank = idist.get_rank()
ws = idist.get_world_size()
def _test(data_src, data_others, safe_mode):
for src in range(ws):
data = data_src if rank == src else data_others
res = idist.broadcast(data, src=src, safe_mode=safe_mode)
if isinstance(res, torch.Tensor):
assert (res == data_src).all(), f"{res} vs {data_src}"
assert data_src.dtype == res.dtype
else:
assert res == data_src, f"{res} vs {data_src}"
_test(10, 0, safe_mode=False)
_test(10, None, safe_mode=True)
t = torch.tensor([1.2345, 2.3456], dtype=torch.float, device=device)
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, "abc", safe_mode=True)
_test("test-abcdefg", "", safe_mode=False)
_test("test-abcdefg", None, safe_mode=True)
_test("test-abcdefg", 1.2, safe_mode=True)
s = "tests/ignite/distributed/utils/test_horovod.py::test_idist_broadcast_hvd" * 200
_test(s, "", safe_mode=False)
_test(s, None, safe_mode=True)
_test(s, 123.0, safe_mode=True)
t = torch.arange(100, device=device).reshape(4, 25) * 2
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, "None", safe_mode=True)
t = torch.tensor(12)
_test(t, torch.empty_like(t), safe_mode=False)
_test(t, None, safe_mode=True)
_test(t, 123.4, safe_mode=True)
if idist.get_world_size() > 1:
with pytest.raises(TypeError, match=r"Unhandled input type"):
idist.broadcast([0, 1, 2], src=0)
if idist.get_world_size() > 1:
msg = "Source data can not be None" if rank == 0 else "Argument safe_mode should be True"
with pytest.raises(ValueError, match=msg):
idist.broadcast(None, src=0)
def _test_distrib_barrier(device):
t = torch.tensor([idist.get_rank()], device=device, dtype=torch.float)
true_res = sum([i for i in range(idist.get_world_size())])
if idist.get_rank() == 0:
t += 10.0
idist.barrier()
tt = idist.all_reduce(t)
assert tt.item() == true_res + 10.0
def _test_distrib_new_group(device):
if idist.get_world_size() > 1 and idist.backend() is not None:
bnd = idist.backend()
ranks = [0, 1]
if idist.has_native_dist_support and bnd in ("nccl", "gloo", "mpi"):
g1 = idist.new_group(ranks)
g2 = dist.new_group(ranks)
rank = idist.get_rank()
if rank in ranks:
assert g1.rank() == g2.rank()
elif idist.has_xla_support and bnd in ("xla-tpu"):
assert idist.new_group(ranks) == [ranks]
elif idist.has_hvd_support and bnd in ("horovod"):
from horovod.common.process_sets import ProcessSet
g1 = idist.new_group(ranks)
g2 = ProcessSet(ranks)
rank = idist.get_rank()
if rank in ranks:
assert g1.ranks == g2.ranks
elif idist.backend() is None:
ranks = [0, 1]
assert idist.new_group(ranks) == ranks
with pytest.raises(ValueError, match="Argument ranks should be list of int"):
ranks = ["a", "b", "c"]
idist.new_group(ranks)
with pytest.raises(ValueError, match="Argument ranks should be list of int"):
ranks = 1
idist.new_group(ranks)
def _test_distrib_one_rank_only(device):
def _test(barrier):
# last rank
rank = idist.get_world_size() - 1
value = torch.tensor(0).to(device)
@idist.one_rank_only(rank=rank, with_barrier=barrier)
def initialize():
value.add_(torch.tensor(100).to(device))
initialize()
value_list = idist.all_gather(tensor=value)
for r in range(idist.get_world_size()):
if r == rank:
assert value_list[r].item() == 100
else:
assert value_list[r].item() == 0
_test(barrier=True)
_test(barrier=False)
def _test_distrib_one_rank_only_with_engine(device):
def _test(barrier):
engine = Engine(lambda e, b: b)
batch_sum = torch.tensor(0).to(device)
@engine.on(Events.ITERATION_COMPLETED)
@idist.one_rank_only(with_barrier=barrier) # ie rank == 0
def _(_):
batch_sum.data += torch.tensor(engine.state.batch).to(device)
engine.run([1, 2, 3], max_epochs=2)
value_list = idist.all_gather(tensor=batch_sum)
for r in range(idist.get_world_size()):
if r == 0:
assert value_list[r].item() == 12
else:
assert value_list[r].item() == 0
_test(barrier=True)
_test(barrier=False)
|
import os
import pytest
import ignite.distributed as idist
from ignite.distributed.utils import has_xla_support
from tests.ignite.distributed.utils import (
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.skipif(has_xla_support, reason="Skip if has PyTorch XLA package")
def test_xla_distrib_spawn_no_xla_support():
with pytest.raises(ValueError, match=r"Backend should be one of"):
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", 1, "xla"), nproc_per_node=1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_no_spawn():
idist.initialize("xla-tpu")
_test_distrib_config(local_rank=0, backend="xla-tpu", ws=1, true_device="xla")
idist.finalize()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_spawn_one_proc():
try:
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", 1, "xla"), nproc_per_node=1)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_xla_distrib_single_node_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
idist.spawn("xla-tpu", _test_distrib_config, args=("xla-tpu", n, "xla"), nproc_per_node=n)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_sync_as_xla():
from ignite.distributed.comp_models.xla import _XlaDistModel
_test_sync(_XlaDistModel)
def _test_sync_as_xla_in_child_proc(index):
from ignite.distributed.comp_models.xla import _XlaDistModel
_test_sync(_XlaDistModel)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_sync_as_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_sync_as_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_methods_in_xla_context():
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
_test_distrib_config(local_rank=0, backend="xla-tpu", ws=1, true_device="xla", rank=0)
def _test_idist_methods_in_xla_context_in_child_proc(index):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
import torch_xla.core.xla_model as xm
_test_distrib_config(
local_rank=index, backend="xla-tpu", ws=xm.xrt_world_size(), true_device="xla", rank=xm.get_ordinal()
)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_methods_in_xla_context_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_methods_in_xla_context_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_reduce_xla():
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
def _test_idist_all_reduce_xla_in_child_proc(index):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_reduce_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_all_reduce_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_new_group_xla():
device = idist.device()
_test_distrib_new_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_gather_xla():
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
def _test_idist_all_gather_xla_in_child_proc(index):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_all_gather_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_all_gather_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_broadcast_xla():
device = idist.device()
_test_distrib_broadcast(device)
def _test_idist_broadcast_xla_in_child_proc(index):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_broadcast_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_broadcast_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_barrier_xla():
device = idist.device()
_test_distrib_barrier(device)
def _test_idist_barrier_xla_in_child_proc(index):
device = idist.device()
_test_distrib_barrier(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_barrier_xla_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_barrier_xla_in_child_proc, args=(), nprocs=n)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_one_rank_only_xla():
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
def _test_idist_one_rank_only_xla_nprocs(index):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test_idist_one_rank_only_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_idist_one_rank_only_xla_nprocs, args=(), nprocs=n)
|
import os
import pytest
import torch
import torch.distributed as dist
from packaging.version import Version
import ignite.distributed as idist
from ignite.distributed.utils import has_native_dist_support
from tests.ignite.distributed.utils import (
_test_distrib__get_max_length,
_test_distrib_all_gather,
_test_distrib_all_gather_group,
_test_distrib_all_reduce,
_test_distrib_all_reduce_group,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_new_group,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
def _test_native_distrib_single_node_launch_tool(backend, device, local_rank, world_size, init_method=None, **kwargs):
import os
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize(backend, init_method=init_method, **kwargs)
_test_distrib_config(local_rank, backend, world_size, device, rank, true_init_method=init_method)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_launch_tool_gloo(init_method, get_fixed_dirname, local_rank, world_size):
from datetime import timedelta
timeout = timedelta(seconds=20)
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_distrib_single_node_launch_tool_gloo')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test_native_distrib_single_node_launch_tool(
"gloo", device, local_rank, world_size, timeout=timeout, init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_launch_tool_nccl(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_distrib_single_node_launch_tool_nccl')}/shared"
device = torch.device(f"cuda:{local_rank}")
_test_native_distrib_single_node_launch_tool("nccl", device, local_rank, world_size, init_method=init_method)
def _test_native_distrib_single_node_spawn(init_method, backend, device, **kwargs):
world_size = 4 if torch.device(device).type == "cpu" else torch.cuda.device_count()
idist.spawn(
backend,
_test_distrib_config,
args=(backend, world_size, device),
nproc_per_node=world_size,
init_method=init_method,
**kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_spawn_gloo(init_method, dirname):
from datetime import timedelta
timeout = timedelta(seconds=20)
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test_native_distrib_single_node_spawn(init_method, "gloo", device, timeout=timeout)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test_native_distrib_single_node_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
device = torch.device("cuda")
_test_native_distrib_single_node_spawn(init_method, "nccl", device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_sync_as_native_gloo(distributed_context_single_node_gloo):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_sync_as_native_nccl(distributed_context_single_node_nccl):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_new_group_native_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_new_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_new_group_native_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_new_group(device)
def _test_idist_methods_in_native_context(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
ws = dist.get_world_size()
rank = dist.get_rank()
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test_idist_methods_in_native_context("gloo", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = torch.device(f"cuda:{local_rank}")
_test_idist_methods_in_native_context("nccl", device, local_rank)
def _test_idist_methods_in_native_context_set_local_rank(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _SerialModel, _set_model
_set_model(_SerialModel())
lrank = int(os.environ["LOCAL_RANK"])
del os.environ["LOCAL_RANK"]
ws = dist.get_world_size()
rank = dist.get_rank()
idist.set_local_rank(local_rank)
_test_distrib_config(local_rank=local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
os.environ["LOCAL_RANK"] = str(lrank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context_set_local_rank(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
device = idist.device()
_test_idist_methods_in_native_context_set_local_rank("gloo", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context_set_local_rank(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
device = idist.device()
_test_idist_methods_in_native_context_set_local_rank("nccl", device, local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist__model_methods_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib__get_max_length(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist__model_methods_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib__get_max_length(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_reduce_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_reduce_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_all_reduce(device)
_test_distrib_all_reduce_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="dist.all_gather_object is not implemented")
def test_idist_all_gather_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="dist.all_gather_object is not implemented")
def test_idist_all_gather_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_all_gather(device)
_test_distrib_all_gather_group(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_broadcast_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_barrier_nccl(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_barrier(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_barrier_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_barrier(device)
def _test_idist_methods_overhead(ok_factor):
import time
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for _ in range(m):
start = time.time()
for _ in range(n):
_ = dist.get_world_size()
_ = dist.get_rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="Do not want to run this test on Github or Travis, but CircleCI"
)
def test_idist_methods_overhead_gloo(distributed_context_single_node_gloo):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_overhead_nccl(distributed_context_single_node_nccl):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.utils import _model
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_one_rank_only_gloo(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_one_rank_only_nccl(local_rank, distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.parametrize("rank", range(int(os.environ.get("WORLD_SIZE", 1))))
@pytest.mark.parametrize("local", [True, False])
def test_one_rank_first(distributed, get_rank_zero_dirname, rank, local):
def get_ds(file_path):
rank = idist.get_local_rank() if local else idist.get_rank()
if not file_path.exists():
with open(file_path, "w") as f:
f.write("readed")
return f"{rank} not readed"
else:
return f"{rank} readed"
folder = get_rank_zero_dirname()
file_path = folder / "res.txt"
with idist.one_rank_first(rank, local=local):
x = get_ds(file_path)
output = idist.all_gather(x)
if local:
expected = [
f"{x} not readed" if x == rank else f"{x} readed" for x in range(idist.get_nproc_per_node())
] * idist.get_nnodes()
else:
expected = [f"{x} not readed" if x == rank else f"{x} readed" for x in range(idist.get_world_size())]
print("expected:", expected, idist.get_nnodes())
assert set(expected) == set(output)
@pytest.mark.distributed
def test_one_rank_first_asserts():
rank = 100
with pytest.raises(
ValueError, match=f"rank should be between 0 and {idist.get_world_size() - 1}, but given {rank}"
):
with idist.one_rank_first(rank):
pass
|
import pytest
import torch
from ignite.distributed.comp_models import has_hvd_support
if not has_hvd_support:
pytest.skip("Skip if no Horovod package", allow_module_level=True)
else:
import horovod.torch as hvd
from ignite.distributed.comp_models.horovod import _HorovodDistModel
@pytest.mark.distributed
def test__hvd_dist_model():
with pytest.raises(ValueError, match=r"Backend should be one of"):
_HorovodDistModel.create_from_backend("abc")
def _assert_model(model, true_conf):
if "cuda" in true_conf["device"]:
assert model.device() == torch.device(f"{true_conf['device']}:{true_conf['local_rank']}")
else:
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__hvd_dist_model_create_from_backend_no_dist(backend, true_device):
model = _HorovodDistModel.create_from_backend(backend=backend)
assert hvd.rank() > -1
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__hvd_dist_model_create_from_backend_dist(backend, true_device):
model = _HorovodDistModel.create_from_backend(backend=backend)
assert hvd.rank() > -1
with pytest.raises(RuntimeError, match=r"Can not re-initialize Horovod if it is already initialized"):
_HorovodDistModel.create_from_backend(backend=backend)
_assert_model(
model,
{
"device": true_device,
"local_rank": hvd.local_rank(),
"rank": hvd.rank(),
"world_size": hvd.size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": hvd.local_size(),
},
)
model.finalize()
def _test__hvd_dist_model_create_from_context_no_dist(true_backend, true_device):
with pytest.raises(ValueError, match=r"Horovod has not been initialized"):
hvd.rank()
assert _HorovodDistModel.create_from_context() is None
hvd.init()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
model = _HorovodDistModel.create_from_context()
assert model.backend() == true_backend
_assert_model(model, true_conf)
hvd.shutdown()
def _test__hvd_dist_model_create_from_context_dist(true_backend, true_device):
assert _HorovodDistModel.create_from_context() is None
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
true_conf = {
"device": true_device,
"local_rank": lrank,
"rank": hvd.rank(),
"world_size": hvd.size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": hvd.local_size(),
}
model = _HorovodDistModel.create_from_context()
assert model.backend() == true_backend
_assert_model(model, true_conf)
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_no_dist(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_no_dist, ("horovod", "cpu"), np=1)
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_no_dist, ("horovod", "cpu"), np=1)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_no_dist_cuda(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_no_dist, ("horovod", "cuda"), np=1)
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_no_dist, ("horovod", "cuda"), np=1)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_dist_1(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_dist, ("horovod", "cpu"), np=4)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_create_dist_2(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_dist, ("horovod", "cpu"), np=4)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_dist_cuda_1(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_backend_dist, ("horovod", "cuda"), np=torch.cuda.device_count())
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_create_dist_cuda_2(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_create_from_context_dist, ("horovod", "cuda"), np=torch.cuda.device_count())
def _test__hvd_dist_model_warning_index_less_localrank():
assert torch.cuda.is_available()
assert _HorovodDistModel.create_from_context() is None
hvd.init()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _HorovodDistModel.create_from_context()
assert isinstance(model, _HorovodDistModel), f"{type(model)} vs _HorovodDistModel"
if hvd.local_rank() == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
hvd.shutdown()
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__hvd_dist_model_warning_index_less_localrank(gloo_hvd_executor):
gloo_hvd_executor(_test__hvd_dist_model_warning_index_less_localrank, (), np=torch.cuda.device_count())
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert hvd.rank() > -1
assert isinstance(_model, _HorovodDistModel), f"{type(_model)} vs _HorovodDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.backend() == backend
if "cuda" in device:
assert _model.device() == torch.device(f"{device}:{local_rank}")
else:
assert _model.device() == torch.device(device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() > 0, reason="Skip if has GPU")
def test__hvd_dist_model_spawn():
num_workers_per_machine = 4
_HorovodDistModel.spawn(
_test_dist_spawn_fn,
args=("horovod", num_workers_per_machine, "cpu"),
kwargs_dict={},
nproc_per_node=num_workers_per_machine,
use_gloo=True,
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__hvd_dist_model_spawn_cuda():
num_workers_per_machine = torch.cuda.device_count()
_HorovodDistModel.spawn(
_test_dist_spawn_fn,
args=("horovod", num_workers_per_machine, "cuda"),
kwargs_dict={},
nproc_per_node=num_workers_per_machine,
use_gloo=True,
)
|
import os
import pytest
import torch
from ignite.distributed.comp_models import has_xla_support
if not has_xla_support:
pytest.skip("Skip if no XLA support", allow_module_level=True)
else:
from ignite.distributed.comp_models.xla import _XlaDistModel
@pytest.mark.tpu
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_model():
available_backends = _XlaDistModel.available_backends
assert "xla-tpu" in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_XlaDistModel.create_from_backend("abc")
def _test_xla_spawn_fn(local_rank, world_size, device):
from ignite.distributed.utils import _model
assert isinstance(_model, _XlaDistModel), f"{type(_model)} vs _XlaDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
d = _model.device()
assert isinstance(d, torch.device) and d.type == device
assert _model.get_rank() == local_rank
assert _model.get_nproc_per_node() == world_size
assert _model.get_node_rank() == 0
assert _model.get_nnodes() == 1
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_one_proc():
try:
_XlaDistModel.spawn(_test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1)
except SystemExit:
pass
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
_XlaDistModel.spawn(_test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n)
except SystemExit:
pass
def _assert_model(model, true_conf):
assert model.device() == true_conf["device"]
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_backend():
# without spawn
model = _XlaDistModel.create_from_backend("xla-tpu")
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context():
# without spawn
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
def _test__xla_dist_model_create_from_context_in_child_proc(index):
model = _XlaDistModel.create_from_context()
assert model.backend() == "xla-tpu"
import torch_xla.core.xla_model as xm
_assert_model(
model,
{
"device": xm.xla_device(),
"local_rank": index,
"rank": xm.get_ordinal(),
"world_size": xm.xrt_world_size(),
"node_index": 0,
"nnodes": 1,
"nproc_per_node": xm.xrt_world_size(),
},
)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_create_from_context_in_child_proc(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test__xla_dist_model_create_from_context_in_child_proc, args=(), nprocs=n)
def main_fold(fold):
import time
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
from ignite.engine import Engine
device = xm.xla_device(fold)
comp_model = _XlaDistModel.create_from_context()
assert comp_model.device() == device
model = nn.Linear(100, 10)
model.to(device) # Move model before creating optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
def training_step(engine, _):
data = torch.rand(4, 100, device=device)
model.train()
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = output.sum()
loss.backward()
xm.optimizer_step(optimizer, barrier=True)
return loss.item()
trainer = Engine(training_step)
# THIS CAN BE A CAUSE OF CRASH if DEVICE is OTHER THAN device
tensor = torch.tensor([fold + 1.0], dtype=torch.float).to(comp_model.device())
xm.all_reduce("max", [tensor])
time.sleep(0.01 * fold)
trainer.run([0] * 100, max_epochs=2)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_run_parallel_n_threads_without_sync():
# tests issue : https://github.com/pytorch/ignite/issues/1096
import torch_xla.core.xla_model as xm
from joblib import delayed, Parallel
devices = xm.get_xla_supported_devices()
folds = 1
d = 0
if len(devices) > 5:
folds = 5
d = 1
Parallel(n_jobs=folds, backend="threading")(delayed(main_fold)(i + d) for i in range(folds))
|
import pytest
import torch
from ignite.distributed.comp_models.base import _SerialModel, ComputationModel
def test_serial_model():
_SerialModel.create_from_backend()
model = _SerialModel.create_from_context()
assert model.get_local_rank() == 0
assert model.get_rank() == 0
assert model.get_world_size() == 1
assert model.get_nproc_per_node() == 1
assert model.get_nnodes() == 1
assert model.get_node_rank() == 0
if torch.cuda.is_available():
assert model.device().type == "cuda"
else:
assert model.device().type == "cpu"
assert model.backend() is None
model.finalize()
with pytest.raises(NotImplementedError, match=r"Serial computation model does not implement spawn method"):
model.spawn()
model.all_reduce(1)
model.all_gather(1)
model.broadcast(1)
assert model._do_all_reduce(torch.tensor(1)) == torch.tensor(1)
assert model._do_all_gather(torch.tensor(1)) == torch.tensor(1)
assert model._do_broadcast(torch.tensor(1), src=0) == torch.tensor(1)
model.barrier()
def test__encode_str__decode_str():
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
s = "test-abcedfg"
encoded_s = ComputationModel._encode_str(s, device, 1024)
assert isinstance(encoded_s, torch.Tensor) and encoded_s.shape == (1, 1025)
decoded_s = ComputationModel._decode_str(encoded_s)
assert isinstance(decoded_s, list) and len(decoded_s) == 1
assert decoded_s[0] == s
def test__encode_input_data():
encoded_msg = ComputationModel._encode_input_data(None, is_src=True)
assert encoded_msg == [-1] * 512
encoded_msg = ComputationModel._encode_input_data(12.0, is_src=True)
assert encoded_msg == [1] + [-1] * 511
encoded_msg = ComputationModel._encode_input_data("abc", is_src=True)
assert encoded_msg == [2] + [-1] * 511
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.randint(-1235, 1233, size=(2, 512, 32, 32, 64))
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 0, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
for t in [None, "abc", torch.rand(2, 512, 32, 32, 64), 12.34, object()]:
encoded_msg = ComputationModel._encode_input_data(t, is_src=False)
assert encoded_msg == [-1] * 512
def test__decode_as_placeholder():
device = torch.device("cpu")
encoded_msg = [-1] * 512
encoded_msg[0] = 1
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, float) and res == 0.0
encoded_msg = [-1] * 512
encoded_msg[0] = 2
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, str) and res == ""
encoded_msg = [-1] * 512
encoded_msg[0] = 0
encoded_msg[1 : 1 + 7] = [6, 2, 3, 4, 5, 6, 7]
dtype_str = "torch.int64"
payload = [len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
encoded_msg[1 + 7 : 1 + 7 + len(payload)] = payload
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == torch.int64 and res.shape == (2, 3, 4, 5, 6, 7)
encoded_msg = [-1] * 512
with pytest.raises(RuntimeError, match="Internal error: unhandled dtype"):
ComputationModel._decode_as_placeholder(encoded_msg, device)
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
def test__setup_placeholder():
device = torch.device("cpu")
from ignite.distributed.utils import _model
for t in [torch.rand(2, 3, 4), "abc", 123.45]:
data = _model._setup_placeholder(t, device, True)
assert isinstance(data, type(t))
if isinstance(data, torch.Tensor):
assert (data == t).all()
else:
assert data == t
|
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device, **kwargs):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
if "master_addr" in kwargs:
assert os.environ["MASTER_ADDR"] == kwargs["master_addr"]
if "master_port" in kwargs:
assert os.environ["MASTER_PORT"] == str(kwargs["master_port"])
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
kwargs_dict = {}
for key in ["master_addr", "master_port"]:
if key in spawn_kwargs:
kwargs_dict[key] = spawn_kwargs[key]
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict=kwargs_dict,
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "CUSTOM_ADDR_PORT", "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
spawn_kwargs = {}
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
elif init_method == "CUSTOM_ADDR_PORT":
init_method = None
spawn_kwargs["master_addr"] = "0.0.0.0"
spawn_kwargs["master_port"] = 2345
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, **spawn_kwargs
)
if device.type == "cpu":
spawn_kwargs["start_method"] = "fork"
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, **spawn_kwargs
)
if init_method not in [None, "env://"]:
with pytest.raises(ValueError, match=r"master_addr should be None if init_method is provided"):
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, master_addr="abc"
)
with pytest.raises(ValueError, match=r"master_port should be None if init_method is provided"):
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, init_method=init_method, master_port=123
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "CUSTOM_ADDR_PORT", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
spawn_kwargs = {}
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
elif init_method == "CUSTOM_ADDR_PORT":
init_method = None
spawn_kwargs["master_addr"] = "0.0.0.0"
spawn_kwargs["master_port"] = 2345
nproc = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=nproc, device="cuda", init_method=init_method, **spawn_kwargs
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
|
import random
from pathlib import Path
import pytest
@pytest.fixture
def no_site_packages(request):
import sys
modules = {}
for k in sys.modules:
if request.param in k:
modules[k] = sys.modules[k]
for k in modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in modules:
sys.modules[k] = modules[k]
@pytest.fixture()
def visdom_offline_logfile(dirname):
log_file = dirname / "logs.visdom"
yield log_file
vd_hostname = None
vd_port = None
vd_server_process = None
@pytest.fixture()
def visdom_server():
# Start Visdom server once and stop it with visdom_server_stop
global vd_hostname, vd_port, vd_server_process
if vd_server_process is None:
import subprocess
import time
from visdom import Visdom
from visdom.server.build import download_scripts
(Path.home() / ".visdom").mkdir(exist_ok=True)
download_scripts()
vd_hostname = "localhost"
vd_port = random.randint(8089, 8887)
try:
vis = Visdom(server=vd_hostname, port=vd_port, raise_exceptions=True)
except ConnectionError:
pass
vd_server_process = subprocess.Popen(
["python", "-m", "visdom.server", "--hostname", vd_hostname, "-port", str(vd_port)]
)
time.sleep(5)
vis = Visdom(server=vd_hostname, port=vd_port)
assert vis.check_connection()
vis.close()
yield (vd_hostname, vd_port)
@pytest.fixture()
def visdom_server_stop():
yield None
import time
vd_server_process.kill()
time.sleep(2)
|
# coding: utf-8
|
from unittest.mock import Mock, patch
import pytest
import torch
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, State
def test_no_pynvml_package():
with patch.dict("sys.modules", {"pynvml.smi": None}):
with pytest.raises(ModuleNotFoundError, match="This contrib module requires pynvml to be installed."):
GpuInfo()
@pytest.mark.skipif(torch.cuda.is_available(), reason="Skip if GPU")
def test_no_gpu():
with pytest.raises(RuntimeError, match="This contrib module requires available GPU"):
GpuInfo()
def _test_gpu_info(device="cpu"):
gpu_info = GpuInfo()
# increase code cov
gpu_info.reset()
gpu_info.update(None)
t = torch.rand(4, 10, 100, 100).to(device)
data = gpu_info.compute()
assert len(data) > 0
assert "fb_memory_usage" in data[0]
mem_report = data[0]["fb_memory_usage"]
assert "used" in mem_report and "total" in mem_report
assert mem_report["total"] > 0.0
assert mem_report["used"] > t.shape[0] * t.shape[1] * t.shape[2] * t.shape[3] / 1024.0 / 1024.0
assert "utilization" in data[0]
util_report = data[0]["utilization"]
assert "gpu_util" in util_report
# with Engine
engine = Engine(lambda engine, batch: 0.0)
engine.state = State(metrics={})
gpu_info.completed(engine, name="gpu")
assert "gpu:0 mem(%)" in engine.state.metrics
assert isinstance(engine.state.metrics["gpu:0 mem(%)"], int)
assert int(mem_report["used"] * 100.0 / mem_report["total"]) == engine.state.metrics["gpu:0 mem(%)"]
if util_report["gpu_util"] != "N/A":
assert "gpu:0 util(%)" in engine.state.metrics
assert isinstance(engine.state.metrics["gpu:0 util(%)"], int)
assert int(util_report["gpu_util"]) == engine.state.metrics["gpu:0 util(%)"]
else:
assert "gpu:0 util(%)" not in engine.state.metrics
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gpu_info_on_cuda():
_test_gpu_info(device="cuda")
query_resp = None
@pytest.fixture
def mock_pynvml_module():
with patch.dict(
"sys.modules",
{
"pynvml": Mock(name="pynvml"),
"pynvml.smi": Mock(name="pynvml.smi"),
"pynvml.smi.nvidia_smi": Mock(name="pynvml.smi.nvidia_smi"),
},
):
import pynvml
from pynvml.smi import nvidia_smi
def query(*args, **kwargs):
return query_resp
def getInstance():
nvsmi = Mock()
nvsmi.DeviceQuery = Mock(side_effect=query)
return nvsmi
nvidia_smi.getInstance = Mock(side_effect=getInstance)
yield pynvml
@pytest.fixture
def mock_gpu_is_available():
with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = True
yield mock_cuda
@pytest.mark.skipif(torch.cuda.is_available(), reason="No need to mock if has GPU")
def test_gpu_info_mock(mock_pynvml_module, mock_gpu_is_available):
global query_resp
query_resp = {"gpu": [{"fb_memory_usage": {"used": 100.0, "total": 11000.0}, "utilization": {"gpu_util": 50.0}}]}
assert torch.cuda.is_available()
_test_gpu_info()
# Tests https://github.com/pytorch/ignite/issues/1040
query_resp = {"gpu": [{"fb_memory_usage": {"used": 100.0, "total": 11000.0}, "utilization": {"gpu_util": "N/A"}}]}
_test_gpu_info()
def _test_with_custom_query(resp, warn_msg, check_compute=False):
from pynvml.smi import nvidia_smi
def query(*args, **kwargs):
return resp
def getInstance():
nvsmi = Mock()
nvsmi.DeviceQuery = Mock(side_effect=query)
return nvsmi
nvidia_smi.getInstance = Mock(side_effect=getInstance)
gpu_info = GpuInfo()
if check_compute:
with pytest.warns(UserWarning, match=warn_msg):
gpu_info.compute()
# with Engine
engine = Engine(lambda engine, batch: 0.0)
engine.state = State(metrics={})
with pytest.warns(UserWarning, match=warn_msg):
gpu_info.completed(engine, name="gpu info")
# No GPU info
_test_with_custom_query(resp={}, warn_msg=r"No GPU information available", check_compute=True)
# No GPU memory info
_test_with_custom_query(resp={"gpu": [{"utilization": {}}]}, warn_msg=r"No GPU memory usage information available")
# No GPU utilization info
_test_with_custom_query(
resp={"gpu": [{"fb_memory_usage": {}}]}, warn_msg=r"No GPU utilization information available"
)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import average_precision_score
import ignite.distributed as idist
from ignite.contrib.metrics import AveragePrecision
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
AveragePrecision()
def test_no_update():
ap = AveragePrecision()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
ap.compute()
def test_input_types():
ap = AveragePrecision()
ap.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
ap.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ap.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
ap.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ap.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
ap = AveragePrecision()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ap._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ap._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
ap._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_binary_and_multilabel_inputs():
ap = AveragePrecision()
def _test(y_pred, y, batch_size):
ap.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ap.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ap.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = ap.compute()
assert isinstance(res, float)
assert average_precision_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 1),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 1),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_integration_binary_and_mulitlabel_inputs():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
ap_metric = AveragePrecision()
ap_metric.attach(engine, "ap")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_ap = average_precision_score(np_y, np_y_pred)
data = list(range(y_pred.shape[0] // batch_size))
ap = engine.run(data, max_epochs=1).metrics["ap"]
assert isinstance(ap, float)
assert np_ap == pytest.approx(ap)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 10),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 10),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(100, 3)).long(), torch.randint(0, 2, size=(100, 3)).long(), 10),
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_and_multilabel_inputs(device):
rank = idist.get_rank()
torch.manual_seed(12)
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
ap = AveragePrecision(device=metric_device)
torch.manual_seed(10 + rank)
ap.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ap.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ap.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = ap.compute()
assert isinstance(res, float)
assert average_precision_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(3):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
n_iters = 80
batch_size = 16
n_classes = 2
def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
metric_device = torch.device(metric_device)
engine = Engine(update_fn)
ap = AveragePrecision(device=metric_device)
ap.attach(engine, "ap")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "ap" in engine.state.metrics
res = engine.state.metrics["ap"]
true_res = average_precision_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
def get_tests(is_N):
torch.manual_seed(12 + rank)
if is_N:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
else:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
y_preds = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size, :],
)
return y_preds, y_true, update_fn
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
# Binary input data of shape (N,)
y_preds, y_true, update_fn = get_tests(is_N=True)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
# Binary input data of shape (N, L)
y_preds, y_true, update_fn = get_tests(is_N=False)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_and_multilabel_inputs, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import roc_curve
from ignite import distributed as idist
from ignite.contrib.metrics.roc_auc import RocCurve
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
def test_wrong_setup():
def compute_fn(y_preds, y_targets):
return 0.0
with pytest.raises(NotComputableError, match="RocCurve must have at least one example before it can be computed"):
metric = RocCurve(compute_fn)
metric.compute()
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed"):
RocCurve()
def test_roc_curve():
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
roc_curve_metric = RocCurve()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
roc_curve_metric.update((y_pred, y))
fpr, tpr, thresholds = roc_curve_metric.compute()
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (x[1], x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_activated_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy()
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred_sigmoid)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (torch.sigmoid(x[1]), x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = RocCurve(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = RocCurve(check_compute_fn=False)
em.update(output)
def test_distrib_integration(distributed):
rank = idist.get_rank()
torch.manual_seed(41 + rank)
n_batches, batch_size = 5, 10
y = torch.randint(0, 2, size=(n_batches * batch_size,))
y_pred = torch.rand((n_batches * batch_size,))
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size],
y[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
device = torch.device("cpu") if idist.device().type == "xla" else idist.device()
metric = RocCurve(device=device)
metric.attach(engine, "roc_curve")
data = list(range(n_batches))
engine.run(data=data, max_epochs=1)
fpr, tpr, thresholds = engine.state.metrics["roc_curve"]
assert isinstance(fpr, torch.Tensor) and fpr.device == device
assert isinstance(tpr, torch.Tensor) and tpr.device == device
assert isinstance(thresholds, torch.Tensor) and thresholds.device == device
y = idist.all_gather(y)
y_pred = idist.all_gather(y_pred)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(y.cpu().numpy(), y_pred.cpu().numpy())
np.testing.assert_array_almost_equal(fpr.cpu().numpy(), sk_fpr)
np.testing.assert_array_almost_equal(tpr.cpu().numpy(), sk_tpr)
np.testing.assert_array_almost_equal(thresholds.cpu().numpy(), sk_thresholds)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import roc_auc_score
import ignite.distributed as idist
from ignite.contrib.metrics import ROC_AUC
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
ROC_AUC()
def test_no_update():
roc_auc = ROC_AUC()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
roc_auc.compute()
def test_input_types():
roc_auc = ROC_AUC()
roc_auc.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
roc_auc.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
roc_auc.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
roc_auc.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
roc_auc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
roc_auc = ROC_AUC()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
roc_auc._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
roc_auc._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
roc_auc._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_binary_and_multilabel_inputs():
roc_auc = ROC_AUC()
def _test(y_pred, y, batch_size):
roc_auc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
roc_auc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
roc_auc.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = roc_auc.compute()
assert isinstance(res, float)
assert roc_auc_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 1),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 1),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for _ in range(5):
test_cases = get_test_cases()
# check multiple random inputs as random exact occurencies are rare
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = ROC_AUC(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = ROC_AUC(check_compute_fn=False)
em.update(output)
def test_integration_binary_and_multilabel_inputs():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_auc_metric = ROC_AUC()
roc_auc_metric.attach(engine, "roc_auc")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_roc_auc = roc_auc_score(np_y, np_y_pred)
data = list(range(y_pred.shape[0] // batch_size))
roc_auc = engine.run(data, max_epochs=1).metrics["roc_auc"]
assert isinstance(roc_auc, float)
assert np_roc_auc == pytest.approx(roc_auc)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 10),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 10),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(100, 3)).long(), torch.randint(0, 2, size=(100, 3)).long(), 10),
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_and_multilabel_inputs(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
roc_auc = ROC_AUC(device=metric_device)
roc_auc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
roc_auc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
roc_auc.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = roc_auc.compute()
assert isinstance(res, float)
assert roc_auc_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary input data of shape (N, L)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
]
return test_cases
for i in range(5):
torch.manual_seed(12 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
n_iters = 80
batch_size = 16
n_classes = 2
def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
metric_device = torch.device(metric_device)
engine = Engine(update_fn)
roc_auc = ROC_AUC(device=metric_device)
roc_auc.attach(engine, "roc_auc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "roc_auc" in engine.state.metrics
res = engine.state.metrics["roc_auc"]
true_res = roc_auc_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
def get_tests(is_N):
if is_N:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
else:
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size, 10)).to(device)
y_preds = torch.rand(n_iters * batch_size, 10).to(device)
def update_fn(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
return y_preds, y_true, update_fn
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
# Binary input data of shape (N,)
y_preds, y_true, update_fn = get_tests(is_N=True)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
# Binary input data of shape (N, L)
y_preds, y_true, update_fn = get_tests(is_N=False)
_test(y_preds, y_true, n_epochs=1, metric_device=metric_device, update_fn=update_fn)
_test(y_preds, y_true, n_epochs=2, metric_device=metric_device, update_fn=update_fn)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_and_multilabel_inputs, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_and_multilabel_inputs(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
from unittest.mock import patch
import pytest
import sklearn
import torch
from sklearn.metrics import cohen_kappa_score
import ignite.distributed as idist
from ignite.contrib.metrics import CohenKappa
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
torch.manual_seed(12)
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
CohenKappa()
def test_no_update():
ck = CohenKappa()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
ck.compute()
def test_input_types():
ck = CohenKappa()
ck.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
ck.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ck.update((torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3))))
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
ck.update((torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32)))
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
ck.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test_check_shape():
ck = CohenKappa()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ck._check_shape((torch.tensor(0), torch.tensor(0)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
ck._check_shape((torch.rand(4, 3, 1), torch.rand(4, 3)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
ck._check_shape((torch.rand(4, 3), torch.rand(4, 3, 1)))
def test_cohen_kappa_wrong_weights_type():
with pytest.raises(ValueError, match=r"Kappa Weighting type must be"):
ck = CohenKappa(weights=7)
with pytest.raises(ValueError, match=r"Kappa Weighting type must be"):
ck = CohenKappa(weights="dd")
@pytest.mark.parametrize("weights", [None, "linear", "quadratic"])
def test_binary_input(weights):
ck = CohenKappa(weights)
def _test(y_pred, y, batch_size):
ck.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ck.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ck.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
res = ck.compute()
assert isinstance(res, float)
assert cohen_kappa_score(np_y, np_y_pred, weights=weights) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_inputs():
ck = CohenKappa()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long()))
ck.compute()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 6)).long(), torch.randint(0, 2, size=(10, 6)).long()))
ck.compute()
with pytest.raises(ValueError, match=r"multilabel-indicator is not supported"):
ck.reset()
ck.update((torch.randint(0, 2, size=(10, 8)).long(), torch.randint(0, 2, size=(10, 8)).long()))
ck.compute()
@pytest.mark.parametrize("weights", [None, "linear", "quadratic"])
def test_integration_binary_input(weights):
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
ck_metric = CohenKappa(weights=weights)
ck_metric.attach(engine, "ck")
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_ck = cohen_kappa_score(np_y, np_y_pred, weights=weights)
data = list(range(y_pred.shape[0] // batch_size))
ck = engine.run(data, max_epochs=1).metrics["ck"]
assert isinstance(ck, float)
assert np_ck == pytest.approx(ck)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 10),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 10),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_binary_input(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
ck = CohenKappa(device=metric_device)
ck.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
ck.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
ck.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = ck.compute()
assert isinstance(res, float)
assert cohen_kappa_score(np_y, np_y_pred) == pytest.approx(res)
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
]
return test_cases
for i in range(3):
torch.manual_seed(10 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration_binary_input(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
torch.manual_seed(12 + rank)
# Binary input data of shape (N,) or (N, 1)
y_true = torch.randint(0, 2, size=(n_iters * batch_size,)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
ck = CohenKappa(device=metric_device)
ck.attach(engine, "ck")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "ck" in engine.state.metrics
res = engine.state.metrics["ck"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = cohen_kappa_score(y_true.cpu().numpy(), y_preds.cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_binary_input(device)
_test_distrib_integration_binary_input(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
from typing import Tuple
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import precision_recall_curve
import ignite.distributed as idist
from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.engine import Engine
from ignite.metrics.epoch_metric import EpochMetricWarning
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed."):
y = torch.tensor([1, 1])
pr_curve = PrecisionRecallCurve()
pr_curve.update((y, y))
pr_curve.compute()
def test_precision_recall_curve():
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred)
precision_recall_curve_metric = PrecisionRecallCurve()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
precision_recall_curve_metric.update((y_pred, y))
precision, recall, thresholds = precision_recall_curve_metric.compute()
precision = precision.numpy()
recall = recall.numpy()
thresholds = thresholds.numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_precision_recall_curve_with_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
precision_recall_curve_metric = PrecisionRecallCurve(output_transform=lambda x: (x[1], x[2]))
precision_recall_curve_metric.attach(engine, "precision_recall_curve")
data = list(range(size // batch_size))
precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"]
precision = precision.numpy()
recall = recall.numpy()
thresholds = thresholds.numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_precision_recall_curve_with_activated_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy()
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y, np_y_pred_sigmoid)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
precision_recall_curve_metric = PrecisionRecallCurve(output_transform=lambda x: (torch.sigmoid(x[1]), x[2]))
precision_recall_curve_metric.attach(engine, "precision_recall_curve")
data = list(range(size // batch_size))
precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"]
precision = precision.cpu().numpy()
recall = recall.cpu().numpy()
thresholds = thresholds.cpu().numpy()
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = PrecisionRecallCurve(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = PrecisionRecallCurve(check_compute_fn=False)
em.update(output)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(y_pred, y, batch_size, metric_device):
metric_device = torch.device(metric_device)
prc = PrecisionRecallCurve(device=metric_device)
prc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
prc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
prc.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y = y.cpu().numpy()
np_y_pred = y_pred.cpu().numpy()
res = prc.compute()
assert isinstance(res, Tuple)
assert precision_recall_curve(np_y, np_y_pred)[0] == pytest.approx(res[0].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[1] == pytest.approx(res[1].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[2] == pytest.approx(res[2].cpu().numpy())
def get_test_cases():
test_cases = [
# Binary input data of shape (N,) or (N, 1)
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
]
return test_cases
for i in range(3):
torch.manual_seed(12 + rank + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 151
torch.manual_seed(12 + rank)
y_true = torch.randint(0, 2, (n_iters * batch_size,)).to(device)
y_preds = torch.randint(0, 2, (n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
prc = PrecisionRecallCurve(device=metric_device)
prc.attach(engine, "prc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert "prc" in engine.state.metrics
precision, recall, thresholds = engine.state.metrics["prc"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
sk_precision, sk_recall, sk_thresholds = precision_recall_curve(np_y_true, np_y_preds)
assert precision.shape == sk_precision.shape
assert recall.shape == sk_recall.shape
assert thresholds.shape == sk_thresholds.shape
assert pytest.approx(precision.cpu().numpy()) == sk_precision
assert pytest.approx(recall.cpu().numpy()) == sk_recall
assert pytest.approx(thresholds.cpu().numpy()) == sk_thresholds
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import ManhattanDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = ManhattanDistance()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mahattan_distance():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = ManhattanDistance()
manhattan = DistanceMetric.get_metric("manhattan")
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = np.abs(ground_truth - a).sum()
assert m.compute() == pytest.approx(np_sum)
assert manhattan.pairwise([a, ground_truth])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - b).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([a, b])
v2 = np.hstack([ground_truth, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - c).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, c])
v2 = np.hstack([v2, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += np.abs(ground_truth - d).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, d])
v2 = np.hstack([v2, ground_truth])
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = ManhattanDistance()
m.attach(engine, "md")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
manhattan = DistanceMetric.get_metric("manhattan")
data = list(range(y_pred.shape[0] // batch_size))
md = engine.run(data, max_epochs=1).metrics["md"]
assert manhattan.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(md)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = ManhattanDistance()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device):
rank = idist.get_rank()
manhattan = DistanceMetric.get_metric("manhattan")
def _test(metric_device):
metric_device = torch.device(metric_device)
m = ManhattanDistance(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert manhattan.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
manhattan = DistanceMetric.get_metric("manhattan")
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = ManhattanDistance(device=metric_device)
m.attach(engine, "md")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "md" in engine.state.metrics
res = engine.state.metrics["md"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
assert pytest.approx(res) == manhattan.pairwise([np_y_preds, np_y_true])[0][1]
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MedianAbsolutePercentageError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MedianAbsolutePercentageError()
with pytest.raises(
NotComputableError, match=r"EpochMetric must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MedianAbsolutePercentageError()
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_percentage_error():
# See https://github.com/torch/torch7/pull/182
# For even number of elements, PyTorch returns middle element
# NumPy returns average of middle elements
# Size of dataset will be odd for these tests
size = 51
np_y_pred = np.random.rand(size)
np_y = np.random.rand(size)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
m = MedianAbsolutePercentageError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
m.update((y_pred, y))
assert np_median_absolute_percentage_error == pytest.approx(m.compute())
def test_median_absolute_percentage_error_2():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
m = MedianAbsolutePercentageError()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
m.reset()
batch_size = 16
n_iters = size // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
m.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert np_median_absolute_percentage_error == pytest.approx(m.compute())
def test_integration_median_absolute_percentage_error():
np.random.seed(1)
size = 105
np_y_pred = np.random.rand(size, 1)
np_y = np.random.rand(size, 1)
np.random.shuffle(np_y)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
batch_size = 15
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MedianAbsolutePercentageError()
m.attach(engine, "median_absolute_percentage_error")
data = list(range(size // batch_size))
median_absolute_percentage_error = engine.run(data, max_epochs=1).metrics["median_absolute_percentage_error"]
assert np_median_absolute_percentage_error == pytest.approx(median_absolute_percentage_error)
def _test_distrib_compute(device):
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MedianAbsolutePercentageError(device=metric_device)
size = 105
y_pred = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
y = torch.randint(1, 10, size=(size, 1), dtype=torch.double, device=device)
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy().ravel()
np_y = y.cpu().numpy().ravel()
res = m.compute()
e = np.abs(np_y - np_y_pred) / np.abs(np_y)
np_res = 100.0 * np.median(e)
assert pytest.approx(res) == np_res
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
size = 105
y_true = torch.rand(size=(n_iters * size,)).to(device)
y_preds = torch.rand(size=(n_iters * size,)).to(device)
def update(engine, i):
return (
y_preds[i * size : (i + 1) * size],
y_true[i * size : (i + 1) * size],
)
engine = Engine(update)
m = MedianAbsolutePercentageError(device=metric_device)
m.attach(engine, "mape")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mape" in engine.state.metrics
res = engine.state.metrics["mape"]
np_y_true = y_true.cpu().numpy().ravel()
np_y_preds = y_preds.cpu().numpy().ravel()
e = np.abs(np_y_true - np_y_preds) / np.abs(np_y_true)
np_res = 100.0 * np.median(e)
assert pytest.approx(res) == np_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
from ignite.contrib.metrics.regression import CanberraMetric
from ignite.engine import Engine
def test_wrong_input_shapes():
m = CanberraMetric()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = CanberraMetric()
canberra = DistanceMetric.get_metric("canberra")
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (np.abs(ground_truth - a) / (np.abs(a) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
assert canberra.pairwise([a, ground_truth])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += ((np.abs(ground_truth - b)) / (np.abs(b) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([a, b])
v2 = np.hstack([ground_truth, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += ((np.abs(ground_truth - c)) / (np.abs(c) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, c])
v2 = np.hstack([v2, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - d) / (np.abs(d) + np.abs(ground_truth))).sum()
assert m.compute() == pytest.approx(np_sum)
v1 = np.hstack([v1, d])
v2 = np.hstack([v2, ground_truth])
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = CanberraMetric()
m.attach(engine, "cm")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
canberra = DistanceMetric.get_metric("canberra")
data = list(range(y_pred.shape[0] // batch_size))
cm = engine.run(data, max_epochs=1).metrics["cm"]
assert canberra.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(cm)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_error_is_not_nan():
m = CanberraMetric()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
def _test_distrib_compute(device):
rank = idist.get_rank()
canberra = DistanceMetric.get_metric("canberra")
def _test(metric_device):
metric_device = torch.device(metric_device)
m = CanberraMetric(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
assert canberra.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
canberra = DistanceMetric.get_metric("canberra")
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = CanberraMetric(device=metric_device)
m.attach(engine, "cm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cm" in engine.state.metrics
res = engine.state.metrics["cm"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
assert pytest.approx(res) == canberra.pairwise([np_y_preds, np_y_true])[0][1]
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import WaveHedgesDistance
from ignite.engine import Engine
def test_wrong_input_shapes():
m = WaveHedgesDistance()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = WaveHedgesDistance()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (np.abs(ground_truth - a) / np.maximum.reduce([a, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - b) / np.maximum.reduce([b, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - c) / np.maximum.reduce([c, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += (np.abs(ground_truth - d) / np.maximum.reduce([d, ground_truth])).sum()
assert m.compute() == pytest.approx(np_sum)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = WaveHedgesDistance()
m.attach(engine, "whd")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
whd = engine.run(data, max_epochs=1).metrics["whd"]
np_sum = (np.abs(np_y - np_y_pred) / np.maximum.reduce([np_y_pred, np_y])).sum()
assert np_sum == pytest.approx(whd)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = WaveHedgesDistance(device=metric_device)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = (np.abs(np_y - np_y_pred) / (np.maximum.reduce([np_y_pred, np_y]) + 1e-30)).sum()
assert np_sum == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = WaveHedgesDistance(device=metric_device)
m.attach(engine, "whm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "whm" in engine.state.metrics
res = engine.state.metrics["whm"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = (np.abs(np_y_true - np_y_preds) / (np.maximum.reduce([np_y_preds, np_y_true]) + 1e-30)).sum()
assert pytest.approx(res) == np_sum
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import GeometricMeanAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = GeometricMeanAbsoluteError()
with pytest.raises(
NotComputableError, match=r"GeometricMeanAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = GeometricMeanAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
np_prod = 1.0
m = GeometricMeanAbsoluteError()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - a)
np_prod = np.multiply.reduce(errors) * np_prod
np_len = len(a)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - b)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(b)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - c)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(c)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
errors = np.abs(ground_truth - d)
np_prod = np.multiply.reduce(errors) * np_prod
np_len += len(d)
np_ans = np.power(np_prod, 1.0 / np_len)
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = GeometricMeanAbsoluteError()
m.attach(engine, "gmae")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
gmae = engine.run(data, max_epochs=1).metrics["gmae"]
sum_errors = (np.log(np.abs(np_y - np_y_pred))).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(gmae)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for i in range(5):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(12 + i)
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = GeometricMeanAbsoluteError(device=metric_device)
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 10, size=(10,), device=device).float()
y = torch.randint(0, 10, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
sum_errors = (np.log(np.abs(np_y - np_y_pred))).sum()
np_len = len(y_pred)
np_ans = np.exp(sum_errors / np_len)
assert np_ans == pytest.approx(res)
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = GeometricMeanAbsoluteError(device=metric_device)
m.attach(engine, "gmae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "gmae" in engine.state.metrics
res = engine.state.metrics["gmae"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
sum_errors = (np.log(np.abs(np_y_true - np_y_preds))).sum()
np_len = len(y_preds)
np_ans = np.exp(sum_errors / np_len)
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(11 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression import MeanNormalizedBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MeanNormalizedBias()
with pytest.raises(
NotComputableError, match=r"MeanNormalizedBias must have at least one example before it can be computed"
):
m.compute()
def test_zero_gt():
a = np.random.randn(4)
ground_truth = np.zeros(4)
m = MeanNormalizedBias()
with pytest.raises(NotComputableError, match=r"The ground truth has 0."):
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
def test_wrong_input_shapes():
m = MeanNormalizedBias()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MeanNormalizedBias()
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = ((ground_truth - a) / ground_truth).sum()
np_len = len(a)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - b) / ground_truth).sum()
np_len += len(b)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - c) / ground_truth).sum()
np_len += len(c)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_sum += ((ground_truth - d) / ground_truth).sum()
np_len += len(d)
np_ans = np_sum / np_len
assert m.compute() == pytest.approx(np_ans)
def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
m = MeanNormalizedBias()
m.attach(engine, "mnb")
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
data = list(range(y_pred.shape[0] // batch_size))
mnb = engine.run(data, max_epochs=1).metrics["mnb"]
np_sum = ((np_y - np_y_pred) / np_y).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(mnb)
def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def _test_distrib_compute(device):
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
m = MeanNormalizedBias(device=metric_device)
y_pred = torch.randint(1, 11, size=(10,), device=device).float()
y = torch.randint(1, 11, size=(10,), device=device).float()
m.update((y_pred, y))
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
res = m.compute()
np_sum = ((np_y - np_y_pred) / np_y).sum()
np_len = len(np_y_pred)
np_ans = np_sum / np_len
assert np_ans == pytest.approx(res)
for i in range(3):
torch.manual_seed(10 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
y_true = torch.rand(size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(size=(n_iters * batch_size,)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanNormalizedBias(device=metric_device)
m.attach(engine, "mnb")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mnb" in engine.state.metrics
res = engine.state.metrics["mnb"]
np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()
np_sum = ((np_y_true - np_y_preds) / np_y_true).sum()
np_len = len(np_y_preds)
np_ans = np_sum / np_len
assert pytest.approx(res) == np_ans
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for i in range(2):
torch.manual_seed(12 + rank + i)
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.