python_code
stringlengths 0
229k
|
---|
import math
from typing import Any, Callable, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.nlp.utils import modified_precision
__all__ = ["Bleu"]
def _closest_ref_length(references: Sequence[Sequence[Any]], hyp_len: int) -> int:
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len))
return closest_ref_len
class _Smoother:
"""
Smoothing helper
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
"""
def __init__(self, method: str):
valid = ["no_smooth", "smooth1", "nltk_smooth2", "smooth2"]
if method not in valid:
raise ValueError(f"Smooth is not valid (expected: {valid}, got: {method})")
self.smooth = method
def __call__(self, numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
method = getattr(self, self.smooth)
return method(numerators, denominators)
@staticmethod
def smooth1(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
epsilon = 0.1
denominators_ = [max(1, d.item()) for d in denominators]
return [n.item() / d if n != 0 else epsilon / d for n, d in zip(numerators, denominators_)]
@staticmethod
def nltk_smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
denominators_ = torch.tensor([max(1, d.item()) for d in denominators])
return _Smoother._smooth2(numerators, denominators_)
@staticmethod
def smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
return _Smoother._smooth2(numerators, denominators)
@staticmethod
def _smooth2(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
return [
(n.item() + 1) / (d.item() + 1) if i != 0 else n.item() / d.item()
for i, (n, d) in enumerate(zip(numerators, denominators))
]
@staticmethod
def no_smooth(numerators: torch.Tensor, denominators: torch.Tensor) -> Sequence[float]:
denominators_ = [max(1, d) for d in denominators]
return [n.item() / d for n, d in zip(numerators, denominators_)]
class Bleu(Metric):
r"""Calculates the `BLEU score <https://en.wikipedia.org/wiki/BLEU>`_.
.. math::
\text{BLEU} = b_{p} \cdot \exp \left( \sum_{n=1}^{N} w_{n} \: \log p_{n} \right)
where :math:`N` is the order of n-grams, :math:`b_{p}` is a sentence brevety penalty, :math:`w_{n}` are
positive weights summing to one and :math:`p_{n}` are modified n-gram precisions.
More details can be found in `Papineni et al. 2002`__.
__ https://www.aclweb.org/anthology/P02-1040
In addition, a review of smoothing techniques can be found in `Chen et al. 2014`__
__ https://aclanthology.org/W14-3346.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) - a list of hypotheses sentences.
- `y` (list(list(list(str))) - a corpus of lists of reference sentences w.r.t hypotheses.
Remark :
This implementation is inspired by nltk
Args:
ngram: order of n-grams.
smooth: enable smoothing. Valid are ``no_smooth``, ``smooth1``, ``nltk_smooth2`` or ``smooth2``.
Default: ``no_smooth``.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
average: specifies which type of averaging to use (macro or micro)
for more details refer https://www.nltk.org/_modules/nltk/translate/bleu_score.html
Default: "macro"
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics.nlp import Bleu
m = Bleu(ngram=4, smooth="smooth1")
y_pred = "the the the the the the the"
y = ["the cat is on the mat", "there is a cat on the mat"]
m.update(([y_pred.split()], [[_y.split() for _y in y]]))
print(m.compute())
.. testoutput::
tensor(0.0393, dtype=torch.float64)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
- ``update`` method has changed and now works on batch of inputs.
- added ``average`` option to handle micro and macro averaging modes.
"""
def __init__(
self,
ngram: int = 4,
smooth: str = "no_smooth",
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
average: str = "macro",
):
if ngram <= 0:
raise ValueError(f"ngram order must be greater than zero (got: {ngram})")
self.ngrams_order = ngram
self.weights = [1 / self.ngrams_order] * self.ngrams_order
self.smoother = _Smoother(method=smooth)
if average not in ["macro", "micro"]:
raise ValueError(f'Average must be either "macro" or "micro" (got: {average})')
self.average = average
if average == "micro":
self._state_dict_all_req_keys = ("p_numerators", "p_denominators", "hyp_length_sum", "ref_length_sum")
else:
self._state_dict_all_req_keys = ("_sum_of_bleu", "_num_sentences")
super(Bleu, self).__init__(output_transform=output_transform, device=device)
def _n_gram_counter(
self,
references: Sequence[Sequence[Sequence[Any]]],
candidates: Sequence[Sequence[Any]],
p_numerators: torch.Tensor,
p_denominators: torch.Tensor,
) -> Tuple[int, int]:
if len(references) != len(candidates):
raise ValueError(
f"nb of candidates should be equal to nb of reference lists ({len(candidates)} != "
f"{len(references)})"
)
hyp_lengths = 0
ref_lengths = 0
# Iterate through each hypothesis and their corresponding references.
for refs, hyp in zip(references, candidates):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i in range(1, self.ngrams_order + 1):
numerator, denominator = modified_precision(refs, hyp, i)
p_numerators[i] += numerator
p_denominators[i] += denominator
# Calculate the hypothesis lengths
hyp_lengths += len(hyp)
# Calculate the closest reference lengths.
ref_lengths += _closest_ref_length(refs, len(hyp))
return hyp_lengths, ref_lengths
def _brevity_penalty_smoothing(
self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int
) -> float:
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If no smoother, returns 0 if there's at least one a not matching n-grams]
if self.smoother.smooth == "no_smooth" and min(p_numerators[1:]).item() == 0:
return 0
# Calculate corpus-level brevity penalty.
if hyp_length_sum < ref_length_sum:
bp = math.exp(1 - ref_length_sum / hyp_length_sum) if hyp_length_sum > 0 else 0.0
else:
bp = 1.0
# Smoothing
p_n = self.smoother(p_numerators[1:], p_denominators[1:])
# Compute the geometric mean
s = [w_i * math.log(p_i) for w_i, p_i in zip(self.weights, p_n)]
gm = bp * math.exp(math.fsum(s))
return gm
def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any]) -> float:
return self._corpus_bleu([references], [candidates])
def _corpus_bleu(self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]]) -> float:
p_numerators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
p_denominators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
hyp_length_sum, ref_length_sum = self._n_gram_counter(
references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators
)
bleu_score = self._brevity_penalty_smoothing(
p_numerators=p_numerators,
p_denominators=p_denominators,
hyp_length_sum=hyp_length_sum,
ref_length_sum=ref_length_sum,
)
return bleu_score
@reinit__is_reduced
def reset(self) -> None:
if self.average == "macro":
self._sum_of_bleu = torch.tensor(0.0, dtype=torch.double, device=self._device)
self._num_sentences = 0
if self.average == "micro":
self.p_numerators = torch.zeros(self.ngrams_order + 1)
self.p_denominators = torch.zeros(self.ngrams_order + 1)
self.hyp_length_sum = 0
self.ref_length_sum = 0
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
y_pred, y = output
if self.average == "macro":
for refs, hyp in zip(y, y_pred):
self._sum_of_bleu += self._sentence_bleu(references=refs, candidates=hyp)
self._num_sentences += 1
elif self.average == "micro":
hyp_lengths, ref_lengths = self._n_gram_counter(
references=y, candidates=y_pred, p_numerators=self.p_numerators, p_denominators=self.p_denominators
)
self.hyp_length_sum += hyp_lengths
self.ref_length_sum += ref_lengths
@sync_all_reduce("_sum_of_bleu", "_num_sentences")
def _compute_macro(self) -> torch.Tensor:
if self._num_sentences == 0:
raise NotComputableError("Bleu must have at least one example before it can be computed.")
return self._sum_of_bleu / self._num_sentences
@sync_all_reduce("p_numerators", "p_denominators", "hyp_length_sum", "ref_length_sum")
def _compute_micro(self) -> float:
bleu_score = self._brevity_penalty_smoothing(
p_numerators=self.p_numerators,
p_denominators=self.p_denominators,
hyp_length_sum=self.hyp_length_sum,
ref_length_sum=self.ref_length_sum,
)
return bleu_score
def compute(self) -> None:
if self.average == "macro":
return self._compute_macro()
elif self.average == "micro":
return self._compute_micro()
|
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics import Metric
# These decorators helps with distributed settings
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
from ignite.metrics.nlp.utils import lcs, ngrams
__all__ = ["Rouge", "RougeN", "RougeL"]
class Score(namedtuple("Score", ["match", "candidate", "reference"])):
r"""
Computes precision and recall for given matches, candidate and reference lengths.
"""
def precision(self) -> float:
"""
Calculates precision.
"""
return self.match / self.candidate if self.candidate > 0 else 0
def recall(self) -> float:
"""
Calculates recall.
"""
return self.match / self.reference if self.reference > 0 else 0
def compute_ngram_scores(candidate: Sequence[Any], reference: Sequence[Any], n: int = 4) -> Score:
"""
Compute the score based on ngram co-occurence of sequences of items
Args:
candidate: candidate sequence of items
reference: reference sequence of items
n: ngram order
Returns:
The score containing the number of ngram co-occurences
.. versionadded:: 0.4.5
"""
# ngrams of the candidate
candidate_counter = ngrams(candidate, n)
# ngrams of the references
reference_counter = ngrams(reference, n)
# ngram co-occurences in the candidate and the references
match_counters = candidate_counter & reference_counter
# the score is defined using Fraction
return Score(
match=sum(match_counters.values()),
candidate=sum(candidate_counter.values()),
reference=sum(reference_counter.values()),
)
def compute_lcs_scores(candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
"""
Compute the score based on longest common subsequence of sequences of items
Args:
candidate: candidate sequence of items
reference: reference sequence of items
Returns:
The score containing the length of longest common subsequence
.. versionadded:: 0.4.5
"""
# lcs of candidate and reference
match = lcs(candidate, reference)
# the score is defined using Fraction
return Score(match=match, candidate=len(candidate), reference=len(reference))
class MultiRefReducer(metaclass=ABCMeta):
r"""
Reducer interface for multi-reference
"""
@abstractmethod
def __call__(self, scores: Sequence[Score]) -> Score:
pass
class MultiRefAverageReducer(MultiRefReducer):
r"""
Reducer for averaging the scores
"""
def __call__(self, scores: Sequence[Score]) -> Score:
match = sum([score.match for score in scores])
candidate = sum([score.candidate for score in scores])
reference = sum([score.reference for score in scores])
return Score(match=match, candidate=candidate, reference=reference)
class MultiRefBestReducer(MultiRefReducer):
r"""
Reducer for selecting the best score
"""
def __call__(self, scores: Sequence[Score]) -> Score:
return max(scores, key=lambda x: x.recall())
class _BaseRouge(Metric):
r"""
Rouge interface for Rouge-L and Rouge-N
"""
_state_dict_all_req_keys = ("_recall", "_precision", "_fmeasure", "_num_examples")
def __init__(
self,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(_BaseRouge, self).__init__(output_transform=output_transform, device=device)
self._alpha = alpha
if not 0 <= self._alpha <= 1:
raise ValueError(f"alpha must be in interval [0, 1] (got : {self._alpha})")
self._multiref = multiref
valid_multiref = ["best", "average"]
if self._multiref not in valid_multiref:
raise ValueError(f"multiref : valid values are {valid_multiref} (got : {self._multiref})")
self._mutliref_reducer = self._get_multiref_reducer()
def _get_multiref_reducer(self) -> MultiRefReducer:
if self._multiref == "average":
return MultiRefAverageReducer()
return MultiRefBestReducer()
@reinit__is_reduced
def reset(self) -> None:
self._recall = 0.0
self._precision = 0.0
self._fmeasure = 0.0
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
candidates, references = output
for _candidate, _reference in zip(candidates, references):
multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref) for _ref in _reference]
score = self._mutliref_reducer(multiref_scores)
precision = score.precision()
recall = score.recall()
self._precision += precision
self._recall += recall
precision_recall = precision * recall
if precision_recall > 0: # avoid zero division
self._fmeasure += precision_recall / ((1 - self._alpha) * precision + self._alpha * recall)
self._num_examples += 1
@sync_all_reduce("_precision", "_recall", "_fmeasure", "_num_examples")
def compute(self) -> Mapping:
if self._num_examples == 0:
raise NotComputableError("Rouge metric must have at least one example before be computed")
return {
f"{self._metric_name()}-P": float(self._precision / self._num_examples),
f"{self._metric_name()}-R": float(self._recall / self._num_examples),
f"{self._metric_name()}-F": float(self._fmeasure / self._num_examples),
}
@abstractmethod
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
pass
@abstractmethod
def _metric_name(self) -> str:
pass
class RougeN(_BaseRouge):
r"""Calculates the Rouge-N score.
The Rouge-N is based on the ngram co-occurences of candidates and references.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
ngram: ngram order (default: 4).
multiref: reduces scores for multi references. Valid values are "best" and "average"
(default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import RougeN
m = RougeN(ngram=2, multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
.. versionadded:: 0.4.5
"""
def __init__(
self,
ngram: int = 4,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(RougeN, self).__init__(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
self._ngram = ngram
if self._ngram < 1:
raise ValueError(f"ngram order must be greater than zero (got : {self._ngram})")
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
return compute_ngram_scores(candidate=candidate, reference=reference, n=self._ngram)
def _metric_name(self) -> str:
return f"Rouge-{self._ngram}"
class RougeL(_BaseRouge):
r"""Calculates the Rouge-L score.
The Rouge-L is based on the length of the longest common subsequence of candidates and references.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
multiref: reduces scores for multi references. Valid values are "best" and "average" (default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import RougeL
m = RougeL(multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5}
.. versionadded:: 0.4.5
"""
def __init__(
self,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(RougeL, self).__init__(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
def _compute_score(self, candidate: Sequence[Any], reference: Sequence[Any]) -> Score:
return compute_lcs_scores(candidate=candidate, reference=reference)
def _metric_name(self) -> str:
return "Rouge-L"
class Rouge(Metric):
r"""Calculates the Rouge score for multiples Rouge-N and Rouge-L metrics.
More details can be found in `Lin 2004`__.
__ https://www.aclweb.org/anthology/W04-1013.pdf
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` (list(list(str))) must be a sequence of tokens.
- `y` (list(list(list(str))) must be a list of sequence of tokens.
Args:
variants: set of metrics computed. Valid inputs are "L" and integer 1 <= n <= 9.
multiref: reduces scores for multi references. Valid values are "best" and "average" (default: "average").
alpha: controls the importance between recall and precision (alpha -> 0: recall is more important, alpha -> 1:
precision is more important)
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. testcode::
from ignite.metrics import Rouge
m = Rouge(variants=["L", 2], multiref="best")
candidate = "the cat is not there".split()
references = [
"the cat is on the mat".split(),
"there is a cat on the mat".split()
]
m.update(([candidate], [references]))
print(m.compute())
.. testoutput::
{'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5, 'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
``update`` method has changed and now works on batch of inputs.
"""
def __init__(
self,
variants: Optional[Sequence[Union[str, int]]] = None,
multiref: str = "average",
alpha: float = 0,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if variants is None or len(variants) == 0:
variants = [1, 2, 4, "L"]
self.internal_metrics: List[_BaseRouge] = []
for m in variants:
variant: Optional[_BaseRouge] = None
if isinstance(m, str) and m == "L":
variant = RougeL(multiref=multiref, alpha=alpha, output_transform=output_transform, device=device)
elif isinstance(m, int):
variant = RougeN(
ngram=m, multiref=multiref, alpha=alpha, output_transform=output_transform, device=device
)
else:
raise ValueError("variant must be 'L' or integer greater to zero")
self.internal_metrics.append(variant)
super(Rouge, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
for m in self.internal_metrics:
m.reset()
@reinit__is_reduced
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
for m in self.internal_metrics:
m.update(output)
def compute(self) -> Mapping:
results = {}
for m in self.internal_metrics:
results.update(m.compute())
return results
|
from ignite.metrics.nlp.bleu import Bleu
from ignite.metrics.nlp.rouge import Rouge, RougeL, RougeN
__all__ = [
"Bleu",
"Rouge",
"RougeN",
"RougeL",
]
|
from collections import Counter
from typing import Any, Sequence, Tuple
__all__ = ["ngrams", "lcs", "modified_precision"]
def ngrams(sequence: Sequence[Any], n: int) -> Counter:
"""
Generate the ngrams from a sequence of items
Args:
sequence: sequence of items
n: n-gram order
Returns:
A counter of ngram objects
.. versionadded:: 0.4.5
"""
return Counter([tuple(sequence[i : i + n]) for i in range(len(sequence) - n + 1)])
def lcs(seq_a: Sequence[Any], seq_b: Sequence[Any]) -> int:
"""
Compute the length of the longest common subsequence in two sequence of items
https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
Args:
seq_a: first sequence of items
seq_b: second sequence of items
Returns:
The length of the longest common subsequence
.. versionadded:: 0.4.5
"""
m = len(seq_a)
n = len(seq_b)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
dp[i][j] = 0
elif seq_a[i - 1] == seq_b[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
return dp[m][n]
def modified_precision(references: Sequence[Sequence[Any]], candidate: Any, n: int) -> Tuple[int, int]:
"""
Compute the modified precision
.. math::
p_{n} = \frac{m_{n}}{l_{n}}
where m_{n} is the number of matched n-grams between translation T and its reference R, and l_{n} is the
total number of n-grams in the translation T.
More details can be found in `Papineni et al. 2002`__.
__ https://www.aclweb.org/anthology/P02-1040.pdf
Args:
references: list of references R
candidate: translation T
n: n-gram order
Returns:
The length of the longest common subsequence
.. versionadded:: 0.4.5
"""
# ngrams of the candidate
counts = ngrams(candidate, n)
# union of ngrams of references
max_counts: Counter = Counter()
for reference in references:
max_counts |= ngrams(reference, n)
# clipped count of the candidate and references
clipped_counts = counts & max_counts
return sum(clipped_counts.values()), sum(counts.values())
|
from ignite.distributed.auto import *
from ignite.distributed.comp_models import native, xla
from ignite.distributed.launcher import Parallel
from ignite.distributed.utils import *
|
import socket
from contextlib import contextmanager
from functools import wraps
from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
import torch
from ignite.distributed.comp_models import (
_SerialModel,
has_hvd_support,
has_native_dist_support,
has_xla_support,
registered_computation_models,
)
from ignite.utils import setup_logger
__all__ = [
"backend",
"broadcast",
"device",
"available_backends",
"model_name",
"get_world_size",
"get_rank",
"get_local_rank",
"get_nproc_per_node",
"get_node_rank",
"get_nnodes",
"spawn",
"initialize",
"finalize",
"show_config",
"set_local_rank",
"all_reduce",
"all_gather",
"barrier",
"hostname",
"has_xla_support",
"has_native_dist_support",
"has_hvd_support",
"sync",
"registered_computation_models",
"one_rank_only",
"new_group",
"one_rank_first",
]
_model = _SerialModel()
_need_to_sync = True
def sync(temporary: bool = False) -> None:
"""Helper method to force this module to synchronize with current distributed context.
This method should be used when distributed context is manually created or destroyed.
Args:
temporary: If True, distributed model synchronization is done every call of ``idist.get_*`` methods.
This may have a negative performance impact.
"""
global _model
for comp_model_cls in registered_computation_models:
if comp_model_cls == _SerialModel:
continue
model = comp_model_cls.create_from_context()
if model is not None:
_set_model(model, temporary=temporary)
return
_model = _SerialModel()
def device() -> torch.device:
"""Returns current device according to current distributed configuration.
- `torch.device("cpu")` if no distributed configuration or torch native gloo distributed configuration
- `torch.device("cuda:local_rank")` if torch native nccl or horovod distributed configuration
- `torch.device("xla:index")` if XLA distributed configuration
Returns:
torch.device
.. versionchanged:: 0.4.2
Added Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.device()
def backend() -> Optional[str]:
"""Returns computation model's backend.
- `None` for no distributed configuration
- "nccl" or "gloo" or "mpi" for native torch distributed configuration
- "xla-tpu" for XLA distributed configuration
- "horovod" for Horovod distributed framework
Returns:
str or None
.. versionchanged:: 0.4.2
Added Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.backend()
def available_backends() -> Tuple[str, ...]:
"""Returns available backends."""
out: Tuple[str, ...] = ()
for m in registered_computation_models:
out += m.available_backends
return out
def model_name() -> str:
"""Returns distributed configuration name (given by ignite)
- `serial` for no distributed configuration
- `native-dist` for native torch distributed configuration
- `xla-dist` for XLA distributed configuration
- `horovod-dist` for Horovod distributed framework
.. versionchanged:: 0.4.2
`horovod-dist` will be returned for Horovod distributed framework.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.name
def get_world_size() -> int:
"""Returns world size of current distributed configuration. Returns 1 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_world_size()
def get_rank() -> int:
"""Returns process rank within current distributed configuration. Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_rank()
def get_local_rank() -> int:
"""Returns local process rank within current distributed configuration.
Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_local_rank()
def get_nproc_per_node() -> int:
"""Returns number of processes (or tasks) per node within current distributed configuration.
Returns 1 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_nproc_per_node()
def get_nnodes() -> int:
"""Returns number of nodes within current distributed configuration.
Returns 1 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_nnodes()
def get_node_rank() -> int:
"""Returns node rank within current distributed configuration.
Returns 0 if no distributed configuration.
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.get_node_rank()
def hostname() -> str:
"""Returns host name for current process within current distributed configuration."""
return socket.gethostname()
def spawn(
backend: str,
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
**kwargs: Any,
) -> None:
"""Spawns ``nproc_per_node`` processes that run ``fn`` with ``args``/``kwargs_dict`` and initialize
distributed configuration defined by ``backend``.
Args:
backend: backend to use: `nccl`, `gloo`, `xla-tpu`, `horovod`
fn: function to called as the entrypoint of the spawned process.
This function must be defined at the top level of a module so it can be pickled and spawned.
This is a requirement imposed by multiprocessing. The function is called as ``fn(i, *args, **kwargs_dict)``,
where `i` is the process index and args is the passed through tuple of arguments.
args: arguments passed to `fn`.
kwargs_dict: kwargs passed to `fn`.
nproc_per_node: number of processes to spawn on a single node. Default, 1.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``nnodes`` (default, 1), ``node_rank`` (default, 0), ``master_addr``
| (default, "127.0.0.1"), ``master_port`` (default, 2222), ``init_method`` (default, "env://"),
| `timeout` to `dist.init_process_group`_ function
| and kwargs for `mp.start_processes`_ function.
- | "xla-tpu" : ``nnodes`` (default, 1), ``node_rank`` (default, 0) and kwargs to `xmp.spawn`_ function.
- | "horovod": ``hosts`` (default, None) and other kwargs to `hvd_run`_ function. Arguments ``nnodes=1``
| and ``node_rank=0`` are tolerated and ignored, otherwise an exception is raised.
Examples:
1) Launch single node multi-GPU training using torch native distributed framework
.. code-block:: python
# >>> python main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c, d=12):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == 4
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
idist.spawn("nccl", train_fn, args=(a, b, c), kwargs_dict={"d": 23}, nproc_per_node=4)
2) Launch multi-node multi-GPU training using torch native distributed framework
.. code-block:: python
# >>> (node 0): python main.py --node_rank=0 --nnodes=8 --master_addr=master --master_port=2222
# >>> (node 1): python main.py --node_rank=1 --nnodes=8 --master_addr=master --master_port=2222
# >>> ...
# >>> (node 7): python main.py --node_rank=7 --nnodes=8 --master_addr=master --master_port=2222
# main.py
import torch
import ignite.distributed as idist
def train_fn(local_rank, nnodes, nproc_per_node):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == nnodes * nproc_per_node
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
idist.spawn(
"nccl",
train_fn,
args=(nnodes, nproc_per_node),
nproc_per_node=nproc_per_node,
nnodes=nnodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
3) Launch single node multi-TPU training (for example on Google Colab) using PyTorch/XLA
.. code-block:: python
# >>> python main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c, d=12):
import torch_xla.core.xla_model as xm
assert xm.get_world_size() == 8
device = idist.device()
assert "xla" in device.type
idist.spawn("xla-tpu", train_fn, args=(a, b, c), kwargs_dict={"d": 23}, nproc_per_node=8)
.. _dist.init_process_group: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. _mp.start_processes: https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn
.. _xmp.spawn: https://pytorch.org/xla/release/1.6/index.html#torch_xla.distributed.xla_multiprocessing.spawn
.. _hvd_run: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.run
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
"""
_assert_backend(backend)
if kwargs_dict is None:
kwargs_dict = {}
for comp_model_cls in registered_computation_models:
if backend not in comp_model_cls.available_backends:
continue
comp_model_cls.spawn(
fn, args=args, kwargs_dict=kwargs_dict, nproc_per_node=nproc_per_node, backend=backend, **kwargs
)
def all_reduce(
tensor: Union[torch.Tensor, float], op: str = "SUM", group: Optional[Union[Any, List[int]]] = None
) -> Union[torch.Tensor, float]:
"""Helper method to perform all reduce operation.
Args:
tensor: tensor or number to collect across participating processes.
op: reduction operation, "SUM" by default. Possible values: "SUM", "PRODUCT", "MIN", "MAX", "AND", "OR".
Horovod backend supports only "SUM", "AVERAGE", "ADASUM", "MIN", "MAX", "PRODUCT".
group: list of integer or the process group for each backend. If None, the default process group will be used.
Returns:
torch.Tensor or number
.. versionchanged:: 0.4.11
added ``group``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
if isinstance(group, list) and all(isinstance(item, int) for item in group):
group = _model.new_group(group)
return _model.all_reduce(tensor, op, group=group)
def all_gather(
tensor: Union[torch.Tensor, float, str], group: Optional[Union[Any, List[int]]] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
"""Helper method to perform all gather operation.
Args:
tensor: tensor or number or str to collect across participating processes. If tensor, it should have the
same shape across processes.
group: list of integer or the process group for each backend. If None, the default process group will be used.
Returns:
If input is a tensor, returns a torch.Tensor of shape ``(world_size * tensor.shape[0], tensor.shape[1], ...)``.
If input is a number, a torch.Tensor of shape ``(world_size, )`` is returned and finally a list of strings
is returned if input is a string. If current process does not belong to `group`, the very ``tensor`` is
returned.
.. versionchanged:: 0.4.11
added ``group``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
if isinstance(group, list) and all(isinstance(item, int) for item in group):
group = _model.new_group(group)
return _model.all_gather(tensor, group=group)
def broadcast(
tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
"""Helper method to perform broadcast operation.
Args:
tensor: tensor or number or str to broadcast to participating processes.
Make sure to respect data type of torch tensor input for all processes, otherwise execution will crash.
Can use None for non-source data with ``safe_mode=True``.
src: source rank. Default, 0.
safe_mode: if True, non source input data can be ``None`` or anything (will be discarded), otherwise data type
of the input ``tensor`` should be respected for all processes. Please, keep in mind, this mode is working
only for dense tensors as source input if a tensor is provided. It also leads to some additional
collectives before the broadcast, making it slower than without using this mode. Default, False.
Returns:
torch.Tensor or string or number
Examples:
.. code-block:: python
y = None
if idist.get_rank() == 0:
t1 = torch.rand(4, 5, 6, device=idist.device())
s1 = "abc"
x = 12.3456
y = torch.rand(1, 2, 3, device=idist.device())
else:
t1 = torch.empty(4, 5, 6, device=idist.device())
s1 = ""
x = 0.0
# Broadcast tensor t1 from rank 0 to all processes
t1 = idist.broadcast(t1, src=0)
assert isinstance(t1, torch.Tensor)
# Broadcast string s1 from rank 0 to all processes
s1 = idist.broadcast(s1, src=0)
# >>> s1 = "abc"
# Broadcast float number x from rank 0 to all processes
x = idist.broadcast(x, src=0)
# >>> x = 12.3456
# Broadcast any of those types from rank 0,
# but other ranks do not define the placeholder
y = idist.broadcast(y, src=0, safe_mode=True)
assert isinstance(y, torch.Tensor)
.. versionadded:: 0.4.2
.. versionchanged:: 0.4.5
added ``safe_mode``
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.broadcast(tensor, src=src, safe_mode=safe_mode)
def barrier() -> None:
"""Helper method to synchronize all processes."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
_model.barrier()
def new_group(ranks: List[int], **kwargs: Any) -> Any:
"""Helper method to make group for each backend from ranks.
Args:
ranks: subset of ranks to be grouped.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``backend (=None)``, ``pg_options (=None)``.
Examples:
Launch single node multi-GPU training with ``torchrun`` utility.
.. code-block:: python
import ignite.distributed as idist
ranks = [0, 1]
group = idist.new_group(ranks)
.. versionadded:: 0.4.11
"""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
return _model.new_group(ranks, **kwargs)
def set_local_rank(index: int) -> None:
"""Method to hint the local rank in case if torch native distributed context is created by user
without using :meth:`~ignite.distributed.utils.initialize` or :meth:`~ignite.distributed.utils.spawn`.
Args:
index: local rank or current process index
Examples:
User set up torch native distributed process group
.. code-block:: python
import ignite.distributed as idist
def run(local_rank, *args, **kwargs):
idist.set_local_rank(local_rank)
# ...
dist.init_process_group(**dist_info)
# ...
"""
from ignite.distributed.comp_models.base import ComputationModel
ComputationModel._ext_local_rank = index
def _set_model(model: Any, temporary: bool = False) -> None:
global _model, _need_to_sync
_model = model
_need_to_sync = True
if not isinstance(_model, _SerialModel) and not temporary:
_need_to_sync = False
def _assert_backend(backend: str) -> None:
backends = available_backends()
if backend not in backends:
raise ValueError(f"Backend should be one of '{backends}'")
def initialize(backend: str, **kwargs: Any) -> None:
"""Initializes distributed configuration according to provided ``backend``
Args:
backend: backend: `nccl`, `gloo`, `xla-tpu`, `horovod`.
kwargs: acceptable kwargs according to provided backend:
- | "nccl" or "gloo" : ``timeout(=timedelta(minutes=30))``, ``init_method(=None)``,
| ``rank(=None)``, ``world_size(=None)``.
| By default, ``init_method`` will be "env://". See more info about parameters: `torch_init`_.
- | "horovod" : comm(=None), more info: `hvd_init`_.
Examples:
Launch single node multi-GPU training with ``torchrun`` utility.
.. code-block:: python
# >>> torchrun --nproc_per_node=4 main.py
# main.py
import ignite.distributed as idist
def train_fn(local_rank, a, b, c):
import torch.distributed as dist
assert dist.is_available() and dist.is_initialized()
assert dist.get_world_size() == 4
device = idist.device()
assert device == torch.device(f"cuda:{local_rank}")
backend = "nccl" # or "gloo" or "horovod" or "xla-tpu"
idist.initialize(backend)
# or for torch native distributed on Windows:
# idist.initialize("nccl", init_method="file://tmp/shared")
local_rank = idist.get_local_rank()
train_fn(local_rank, a, b, c)
idist.finalize()
.. _torch_init: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. _hvd_init: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.torch
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
.. versionchanged:: 0.4.5
``kwargs`` now accepts ``init_method``, ``rank``, ``world_size`` for PyTorch native distributed backend.
"""
if not (has_xla_support or has_native_dist_support or has_hvd_support):
# nothing to do => serial model
# maybe warn about this
return
_assert_backend(backend)
for comp_model_cls in registered_computation_models:
if backend not in comp_model_cls.available_backends:
continue
_set_model(comp_model_cls(backend, **kwargs))
def finalize() -> None:
"""Finalizes distributed configuration. For example, in case of native pytorch distributed configuration,
it calls ``dist.destroy_process_group()``.
"""
_model.finalize()
_set_model(_SerialModel())
def show_config() -> None:
"""Helper method to display distributed configuration via ``logging``."""
# setup parallel logger
logger = setup_logger(__name__)
logger.info(f"distributed configuration: {model_name()}")
logger.info(f"backend: {backend()}")
logger.info(f"device: {device().type}")
logger.info(f"hostname: {hostname()}")
logger.info(f"world size: {get_world_size()}")
logger.info(f"rank: {get_rank()}")
logger.info(f"local rank: {get_local_rank()}")
logger.info(f"num processes per_node: {get_nproc_per_node()}")
logger.info(f"num nodes: {get_nnodes()}")
logger.info(f"node rank: {get_node_rank()}")
def one_rank_only(rank: int = 0, with_barrier: bool = False) -> Callable:
"""Decorator to filter handlers wrt a rank number
Args:
rank: rank number of the handler (default: 0).
with_barrier: synchronisation with a barrier (default: False).
Examples:
.. code-block:: python
engine = ...
@engine.on(...)
@one_rank_only() # means @one_rank_only(rank=0)
def some_handler(_):
...
@engine.on(...)
@one_rank_only(rank=1)
def some_handler(_):
...
"""
def _one_rank_only(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
ret = None
if get_rank() == rank:
ret = func(*args, **kwargs)
if with_barrier:
barrier()
return ret
return wrapper
return _one_rank_only
@contextmanager
def one_rank_first(rank: int = 0, local: bool = False) -> Any:
"""Context manager that ensures a specific rank runs first before others in a distributed
environment.
Args:
rank: rank of the process that should execute the code
block inside the context manager first. Default, 0.
local: flag to specify local rank or global rank.
If True ``rank`` argument will define a local rank to run first.
Default, False
Examples:
.. code-block:: python
def download_dataset():
...
with idist.one_rank_first():
ds = download_dataset()
dp = ds[0]
.. versionadded:: 0.5.0
"""
current_rank = get_local_rank() if local else get_rank()
size = get_nproc_per_node() if local else get_world_size()
if rank >= size or rank < 0:
raise ValueError(f"rank should be between 0 and {size - 1}, but given {rank}")
if current_rank != rank:
barrier()
yield
if current_rank == rank:
barrier()
|
from typing import Any, Callable, Dict, Optional
from ignite.distributed import utils as idist
from ignite.utils import setup_logger
__all__ = [
"Parallel",
]
class Parallel:
"""Distributed launcher context manager to simplify distributed configuration setup for multiple backends:
- backends from native torch distributed configuration: "nccl", "gloo" and "mpi" (if available)
- XLA on TPUs via `pytorch/xla <https://github.com/pytorch/xla>`_ (if installed)
- using `Horovod distributed framework <https://horovod.readthedocs.io>`_ (if installed)
Namely, it can:
1) Spawn ``nproc_per_node`` child processes and initialize a processing group according to
provided ``backend`` (useful for standalone scripts).
2) Only initialize a processing group given the ``backend``
(useful with tools like `torchrun`_, `horovodrun`_, etc).
Args:
backend: backend to use: `nccl`, `gloo`, `xla-tpu`, `horovod`. If None, no distributed
configuration.
nproc_per_node: optional argument, number of processes per
node to specify. If not None, :meth:`~ignite.distributed.launcher.Parallel.run`
will spawn ``nproc_per_node`` processes that run input function with its arguments.
nnodes: optional argument, number of nodes participating in distributed configuration.
If not None, :meth:`~ignite.distributed.launcher.Parallel.run` will spawn ``nproc_per_node``
processes that run input function with its arguments. Total world size is `nproc_per_node * nnodes`.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
node_rank: optional argument, current machine index. Mandatory argument if ``nnodes`` is
specified and larger than one.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
master_addr: optional argument, master node TCP/IP address for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``nnodes`` is specified and larger than one.
master_port: optional argument, master node port for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``master_addr`` is specified.
init_method: optional argument to specify processing group initialization method for torch native
backends (`nccl`, `gloo`). Default, "env://".
See more info: `dist.init_process_group`_.
spawn_kwargs: kwargs to ``idist.spawn`` function.
Examples:
1) Single node or Multi-node, Multi-GPU training launched with `torchrun` or `horovodrun`_
tools
Single node option with 4 GPUs
.. code-block:: bash
torchrun --nproc_per_node=4 main.py
# or if installed horovod
horovodrun -np=4 python main.py
Multi-node option : 2 nodes with 8 GPUs each
.. code-block:: bash
## node 0
torchrun --nnodes=2 --node_rank=0 --master_addr=master --master_port=3344 \
--nproc_per_node=8 main.py
# or if installed horovod
horovodrun -np 16 -H hostname1:8,hostname2:8 python main.py
## node 1
torchrun --nnodes=2 --node_rank=1 --master_addr=master --master_port=3344 \
--nproc_per_node=8 main.py
User code is the same for both options:
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
config = {"key": "value"}
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
2) Single node, Multi-GPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
# no "init_method" was specified , "env://" will be used
with idist.Parallel(backend=backend, nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``file://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='file:///d:/tmp/some_file', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``tcp://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='tcp://10.1.1.20:23456', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
3) Single node, Multi-TPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
config = {"key": "value"}
with idist.Parallel(backend="xla-tpu", nproc_per_node=8) as parallel:
parallel.run(training, config, a=1, b=2)
4) Multi-node, Multi-GPU training launched with `python`. For example, 2 nodes with 8 GPUs:
Using torch native distributed framework:
.. code-block:: bash
# node 0
python main.py --node_rank=0
# node 1
python main.py --node_rank=1
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
dist_config = {
"nproc_per_node": 8,
"nnodes": 2,
"node_rank": args.node_rank,
"master_addr": "master",
"master_port": 15000
}
config = {"key": "value"}
with idist.Parallel(backend="nccl", **dist_config) as parallel:
parallel.run(training, config, a=1, b=2)
.. _torchrun: https://pytorch.org/docs/stable/elastic/run.html#launcher-api
.. _horovodrun: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.run
.. _dist.init_process_group: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
.. versionchanged:: 0.4.5
``init_method`` added.
"""
def __init__(
self,
backend: Optional[str] = None,
nproc_per_node: Optional[int] = None,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> None:
if backend is not None:
if backend not in idist.available_backends():
raise ValueError(f"Unknown backend '{backend}'. Available backends: {idist.available_backends()}")
else:
arg_names = ["nproc_per_node", "nnodes", "node_rank", "master_addr", "master_port"]
arg_values = [nproc_per_node, nnodes, node_rank, master_addr, master_port]
for name, value in zip(arg_names, arg_values):
if value is not None:
raise ValueError(f"If backend is None, argument '{name}' should be also None, but given {value}")
self.backend = backend
self._spawn_params = None
self.init_method = init_method
if self.backend is not None:
if nproc_per_node is not None:
self._spawn_params = self._setup_spawn_params(
nproc_per_node, nnodes, node_rank, master_addr, master_port, init_method, **spawn_kwargs
)
# The logger will be setup after the idist.initialize() call
self._logger = None
@staticmethod
def _setup_spawn_params(
nproc_per_node: int,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> Dict:
if nproc_per_node < 1:
raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}")
if nnodes is None:
nnodes = 1
if nnodes < 1:
raise ValueError(f"Argument nnodes should positive, but given {nnodes}")
if node_rank is None:
if nnodes > 1:
raise ValueError("If number of nodes larger than one, arguments node_rank should be given")
node_rank = 0
if node_rank >= nnodes or node_rank < 0:
raise ValueError(f"Argument node_rank should be between 0 and {nnodes - 1}, but given {node_rank}")
if nnodes > 1 and (master_addr is None or master_port is None) and init_method is None:
raise ValueError(
"If number of nodes larger than one, arguments master_addr and master_port or init_method "
f"should be specified, but given master_addr={master_addr}, master_port={master_port} and "
f"init_method={init_method}."
)
params = {
"nproc_per_node": nproc_per_node,
"nnodes": nnodes,
"node_rank": node_rank,
"master_addr": master_addr,
"master_port": master_port,
"init_method": init_method,
}
params.update(spawn_kwargs)
return {k: v for k, v in params.items() if v is not None}
def run(self, func: Callable, *args: Any, **kwargs: Any) -> None:
"""Execute ``func`` with provided arguments in distributed context.
Args:
func: function to execute. First argument of the function should be `local_rank` - local process
index.
args: positional arguments of ``func`` (without `local_rank`).
kwargs: keyword arguments of ``func``.
Examples:
.. code-block:: python
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
config = {"key": "value"}
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
"""
if self._spawn_params is not None and self.backend is not None:
self._logger.info( # type: ignore[attr-defined]
f"Spawn function '{func}' in {self._spawn_params['nproc_per_node']} processes"
)
idist.spawn(self.backend, func, args=args, kwargs_dict=kwargs, **self._spawn_params)
else:
self._logger.info(f"- Run '{func}' in {idist.get_world_size()} processes") # type: ignore[attr-defined]
local_rank = idist.get_local_rank()
func(local_rank, *args, **kwargs)
self._logger.info("End of run") # type: ignore[attr-defined]
def __enter__(self) -> "Parallel":
if self.backend is not None and self._spawn_params is None:
idist.initialize(self.backend, init_method=self.init_method)
# The logger can be setup from now since idist.initialize() has been called (if needed)
self._logger = setup_logger(__name__ + "." + self.__class__.__name__) # type: ignore[assignment]
if self.backend is not None:
if self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Initialized processing group with backend: '{self.backend}'"
)
else:
self._logger.info( # type: ignore[attr-defined]
f"Initialized distributed launcher with backend: '{self.backend}'"
)
msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None])
self._logger.info(f"- Parameters to spawn processes: \n\t{msg}") # type: ignore[attr-defined]
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
if (self.backend is not None) and self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Finalized processing group with backend: '{self.backend}'"
)
idist.finalize()
|
import warnings
from typing import Any, Iterator, List, Optional, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import Sampler
from ignite.distributed import utils as idist
from ignite.distributed.comp_models import horovod as idist_hvd, native as idist_native, xla as idist_xla
from ignite.utils import setup_logger
__all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"]
def auto_dataloader(dataset: Dataset, **kwargs: Any) -> Union[DataLoader, "_MpDeviceLoader"]:
"""Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, we create a dataloader with provided kwargs while applying the following updates:
- batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.
- number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.
- if no sampler provided by user, a `torch DistributedSampler`_ is setup.
- if a `torch DistributedSampler`_ is provided by user, it is used without wrapping it.
- if another sampler is provided, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.
- if the default device is 'cuda', `pin_memory` is automatically set to `True`.
.. warning::
Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch
sampler is compatible with distributed configuration.
Args:
dataset: input torch dataset. If input dataset is `torch IterableDataset`_ then dataloader will be
created without any distributed sampling. Please, make sure that the dataset itself produces
different data on different ranks.
kwargs: keyword arguments for `torch DataLoader`_.
Returns:
`torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices
Examples:
.. code-block:: python
import ignite.distribted as idist
train_loader = idist.auto_dataloader(
train_dataset,
batch_size=32,
num_workers=4,
shuffle=True,
pin_memory="cuda" in idist.device().type,
drop_last=True,
)
.. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
.. _XLA MpDeviceLoader:
https://pytorch.org/xla/release/2.0/index.html#running-on-multiple-xla-devices-with-multi-processing
.. _torch DistributedSampler:
https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
.. _torch IterableDataset: https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset
"""
rank = idist.get_rank()
world_size = idist.get_world_size()
logger = setup_logger(__name__ + ".auto_dataloader")
if world_size > 1:
if "batch_size" in kwargs and kwargs["batch_size"] >= world_size:
kwargs["batch_size"] //= world_size
nproc = idist.get_nproc_per_node()
if "num_workers" in kwargs and kwargs["num_workers"] >= nproc:
kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc
if "batch_sampler" not in kwargs:
if isinstance(dataset, IterableDataset):
logger.info(
"Found iterable dataset, dataloader will be created without any distributed sampling. "
"Please, make sure that the dataset itself produces different data on different ranks."
)
else:
sampler: Optional[Union[DistributedProxySampler, DistributedSampler, Sampler]]
sampler = kwargs.get("sampler", None)
if isinstance(sampler, DistributedSampler):
if sampler.rank != rank:
warnings.warn(f"Found distributed sampler with rank={sampler.rank}, but process rank is {rank}")
if sampler.num_replicas != world_size:
warnings.warn(
f"Found distributed sampler with num_replicas={sampler.num_replicas}, "
f"but world size is {world_size}"
)
elif sampler is None:
# removes "shuffle" from kwargs if sampler is used
shuffle = kwargs.pop("shuffle", True)
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
else:
sampler = DistributedProxySampler(sampler, num_replicas=world_size, rank=rank)
kwargs["sampler"] = sampler
else:
warnings.warn(
"Found batch_sampler in provided kwargs. Please, make sure that it is compatible "
"with distributed configuration"
)
if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False):
# TODO: How about XLA GPU ?
warnings.warn(
"Found incompatible options: xla support and pin_memory args equal True. "
"Argument `pin_memory=False` will be used to construct data loader."
)
kwargs["pin_memory"] = False
else:
kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type)
logger.info(f"Use data loader kwargs for dataset '{repr(dataset)[:20].strip()}': \n\t{kwargs}")
dataloader = DataLoader(dataset, **kwargs)
if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:
logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA")
mp_device_loader_cls = _MpDeviceLoader
try:
from torch_xla.distributed.parallel_loader import MpDeviceLoader
mp_device_loader_cls = MpDeviceLoader
except ImportError:
pass
mp_dataloader = mp_device_loader_cls(dataloader, idist.device())
mp_dataloader.sampler = dataloader.sampler # type: ignore[attr-defined]
return mp_dataloader
return dataloader
def auto_model(model: nn.Module, sync_bn: bool = False, **kwargs: Any) -> nn.Module:
"""Helper method to adapt provided model for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, we perform to following:
- send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.
- wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.
- wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
- broadcast the initial variable states from rank 0 to all other processes if Horovod distributed framework is used.
Args:
model: model to adapt.
sync_bn: if True, applies `torch convert_sync_batchnorm`_ to the model for native torch
distributed only. Default, False. Note, if using Nvidia/Apex, batchnorm conversion should be
applied before calling ``amp.initialize``.
kwargs: kwargs to model's wrapping class: `torch DistributedDataParallel`_ or `torch DataParallel`_
if applicable. Please, make sure to use acceptable kwargs for given backend.
Returns:
torch.nn.Module
Examples:
.. code-block:: python
import ignite.distribted as idist
model = idist.auto_model(model)
In addition with NVidia/Apex, it can be used in the following way:
.. code-block:: python
import ignite.distribted as idist
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
model = idist.auto_model(model)
.. _torch DistributedDataParallel: https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
.. _torch DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
.. _torch convert_sync_batchnorm: https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html#
torch.nn.SyncBatchNorm.convert_sync_batchnorm
.. versionchanged:: 0.4.2
- Added Horovod distributed framework.
- Added ``sync_bn`` argument.
.. versionchanged:: 0.4.3
Added kwargs to ``idist.auto_model``.
"""
logger = setup_logger(__name__ + ".auto_model")
# Put model's parameters to device if its parameters are not on the device
device = idist.device()
if not all([p.device == device for p in model.parameters()]):
model.to(device)
# distributed data parallel model
if idist.get_world_size() > 1:
bnd = idist.backend()
if idist.has_native_dist_support and bnd in (idist_native.NCCL, idist_native.GLOO, idist_native.MPI):
if sync_bn:
logger.info("Convert batch norm to sync batch norm")
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
if torch.cuda.is_available():
if "device_ids" in kwargs:
raise ValueError(f"Argument kwargs should not contain 'device_ids', but got {kwargs}")
lrank = idist.get_local_rank()
logger.info(f"Apply torch DistributedDataParallel on model, device id: {lrank}")
kwargs["device_ids"] = [
lrank,
]
else:
logger.info("Apply torch DistributedDataParallel on model")
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
elif idist.has_hvd_support and bnd == idist_hvd.HOROVOD:
import horovod.torch as hvd
logger.info("Broadcast the initial variable states from rank 0 to all other processes")
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
# not distributed but multiple GPUs reachable so data parallel model
elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type:
logger.info("Apply torch DataParallel on model")
model = torch.nn.parallel.DataParallel(model, **kwargs)
return model
def auto_optim(optimizer: Optimizer, **kwargs: Any) -> Optimizer:
"""Helper method to adapt optimizer for non-distributed and distributed configurations (supporting
all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
Internally, this method is no-op for non-distributed and torch native distributed configuration.
For XLA distributed configuration, we create a new class that inherits from provided optimizer.
The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.
For Horovod distributed configuration, optimizer is wrapped with Horovod Distributed Optimizer and
its state is broadcasted from rank 0 to all other processes.
Args:
optimizer: input torch optimizer
kwargs: kwargs to Horovod backend's DistributedOptimizer.
Returns:
Optimizer
Examples:
.. code-block:: python
import ignite.distributed as idist
optimizer = idist.auto_optim(optimizer)
.. _xm.optimizer_step: https://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step
.. versionchanged:: 0.4.2
Added Horovod distributed optimizer.
.. versionchanged:: 0.4.7
Added kwargs to ``idist.auto_optim``.
"""
bnd = idist.backend()
if idist.has_xla_support and bnd == idist_xla.XLA_TPU:
cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))
return cls(optimizer)
if idist.has_hvd_support and bnd == idist_hvd.HOROVOD:
import horovod.torch as hvd
optimizer = hvd.DistributedOptimizer(optimizer, **kwargs)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
return optimizer
return optimizer
class DistributedProxySampler(DistributedSampler):
"""Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.
Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407
Args:
sampler: Input torch data sampler.
num_replicas: Number of processes participating in distributed training.
rank: Rank of the current process within ``num_replicas``.
.. note::
Input sampler is assumed to have a constant size.
"""
def __init__(self, sampler: Sampler, num_replicas: Optional[int] = None, rank: Optional[int] = None) -> None:
if not isinstance(sampler, Sampler):
raise TypeError(f"Argument sampler should be instance of torch Sampler, but given: {type(sampler)}")
if isinstance(sampler, DistributedSampler):
raise TypeError("Argument sampler must not be a distributed sampler already")
if not hasattr(sampler, "__len__"):
raise TypeError("Argument sampler should have length")
super(DistributedProxySampler, self).__init__(
sampler, num_replicas=num_replicas, rank=rank, shuffle=False # type: ignore[arg-type]
)
self.sampler = sampler
def __iter__(self) -> Iterator:
# deterministically shuffle based on epoch
torch.manual_seed(self.epoch)
indices: List = []
while len(indices) < self.total_size:
indices += list(self.sampler)
if len(indices) > self.total_size:
indices = indices[: self.total_size]
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
if len(indices) != self.num_samples:
raise RuntimeError(f"{len(indices)} vs {self.num_samples}")
return iter(indices)
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
from torch_xla.distributed.parallel_loader import ParallelLoader
class _MpDeviceLoader:
# https://github.com/pytorch/xla/pull/2117
# From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available
def __init__(self, loader: Any, device: torch.device, **kwargs: Any) -> None:
self._loader = loader
self._device = device
self._parallel_loader_kwargs = kwargs
def __iter__(self) -> Iterator:
parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)
return parallel_loader.per_device_loader(self._device)
def __len__(self) -> int:
return len(self._loader)
class _XLADistributedOptimizer(Optimizer):
def __init__(self, optimizer: Optimizer) -> None:
super(self.__class__, self).__init__(optimizer.param_groups) # type: ignore[call-arg]
self.wrapped_optimizer = optimizer
def step(self, closure: Any = None) -> Any:
xm.optimizer_step(self.wrapped_optimizer, barrier=True)
|
import warnings
from typing import Any, Callable, cast, List, Mapping, Optional, Tuple
import torch
from ignite.distributed.comp_models.base import ComputationModel
try:
import horovod.torch as hvd
try:
# old API
from horovod.run.runner import run as hvd_mp_spawn
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run as hvd_mp_spawn
has_hvd_support = True
except ImportError:
has_hvd_support = False
if has_hvd_support:
HOROVOD = "horovod"
class _HorovodDistModel(ComputationModel):
"""Private class for `Horovod <https://horovod.readthedocs.io/en/stable/>`_ distributed computation model."""
name = "horovod-dist"
available_backends = (HOROVOD,)
@staticmethod
def _get_hvd_rank() -> int:
try:
rank = hvd.rank()
except ValueError as e:
rank = -1
return rank
@staticmethod
def create_from_context() -> Optional["_HorovodDistModel"]:
rank = _HorovodDistModel._get_hvd_rank()
# hvd must be initialized
if not rank > -1:
return None
return _HorovodDistModel()
@staticmethod
def create_from_backend(backend: str = HOROVOD, **kwargs: Any) -> "_HorovodDistModel":
if backend not in _HorovodDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_HorovodDistModel.available_backends}'")
rank = _HorovodDistModel._get_hvd_rank()
# hvd must be not initialized
if rank > -1:
raise RuntimeError("Can not re-initialize Horovod if it is already initialized")
return _HorovodDistModel(backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any) -> None:
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_HorovodDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
else:
self._init_from_context()
def _create_from_backend(self, backend: str, **kwargs: Any) -> None:
self._backend: str = backend
comm = kwargs.get("comm", None)
hvd.init(comm=comm)
self._setup_attrs()
if torch.cuda.is_available():
torch.cuda.set_device(self.get_local_rank())
def _init_from_context(self) -> None:
self._backend = HOROVOD
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
return hvd.local_size()
def get_local_rank(self) -> int:
return hvd.local_rank()
def get_rank(self) -> int:
return hvd.rank()
def get_world_size(self) -> int:
return hvd.size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
if torch.cuda.is_available():
index = torch.cuda.current_device()
if index < self.get_local_rank():
warnings.warn(
"Current device index is less than current local rank. "
"Please, make sure to call torch.cuda.set_device(local_rank)."
)
return torch.device(f"cuda:{index}")
return torch.device("cpu")
def backend(self) -> str:
return self._backend
def finalize(self) -> None:
hvd.shutdown()
@staticmethod
def _dist_worker_task_fn(backend: str, fn: Callable, args: Tuple, kwargs_dict: Mapping) -> None:
from ignite.distributed.utils import _set_model, finalize
model = _HorovodDistModel.create_from_backend(backend)
_set_model(model)
fn(model.get_local_rank(), *args, **kwargs_dict)
finalize()
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
hosts: Optional[str] = None,
backend: str = HOROVOD,
**kwargs: Any,
) -> None:
c1 = "nnodes" in kwargs and kwargs["nnodes"] > 1
c2 = "node_rank" in kwargs and kwargs["node_rank"] > 0
if c1 or c2:
raise RuntimeError(
"For multi-node configuration, please set 'hosts' argument instead according to horovod.run API."
)
if "nnodes" in kwargs:
# Remove 'nnodes=1' as it is an unexpected keyword argument for horovod.run
del kwargs["nnodes"]
if "node_rank" in kwargs:
# Remove 'node_rank=0' as it is an unexpected keyword argument for horovod.run
del kwargs["node_rank"]
hvd_mp_spawn(
_HorovodDistModel._dist_worker_task_fn,
args=(HOROVOD, fn, args, kwargs_dict),
num_proc=nproc_per_node,
hosts=hosts,
**kwargs,
)
_reduce_op_map = {
"SUM": hvd.mpi_ops.Sum,
"AVERAGE": hvd.mpi_ops.Average,
"ADASUM": hvd.mpi_ops.Adasum,
}
_manual_reduce_op_map = {"MIN": torch.min, "MAX": torch.max, "PRODUCT": torch.prod}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if group is not None:
raise NotImplementedError("all_reduce with group for horovod is not implemented")
if op in self._manual_reduce_op_map:
op_fn = self._manual_reduce_op_map[op]
return self._do_manual_all_reduce(tensor, op_fn)
if op not in self._reduce_op_map:
raise ValueError(f"Unsupported reduction operation: '{op}'")
op = self._reduce_op_map[op]
return hvd.allreduce(tensor, op=op)
def _do_manual_all_reduce(self, tensor: torch.Tensor, op: Any) -> torch.Tensor:
# We have to unsqueeze otherwise tensors will be gathered into a single tensor
# without splitting (e.g. [1, 1, 1, 3, 3, 3] instead of [[1, 1, 1], [3, 3, 3]])
# and reduction op wont work as expected
res = self._do_all_gather(tensor.unsqueeze(0))
reduced_res = op(res, dim=0)
if isinstance(reduced_res, torch.Tensor):
return reduced_res
# output can also torch min/max_return_type: (min/max_vals, indices)
return reduced_res[0]
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
if group is not None:
raise NotImplementedError("all_gather with group for horovod is not implemented")
if tensor.ndimension() == 0:
tensor = tensor.unsqueeze(0)
return hvd.allgather(tensor)
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
if group is not None:
raise NotImplementedError("all_gather with group for horovod is not implemented")
return hvd.allgather_object(tensor)
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return hvd.ProcessSet(ranks)
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
return hvd.broadcast(tensor, root_rank=src)
def barrier(self) -> None:
# https://github.com/horovod/horovod/issues/159#issuecomment-424834603
# hvd.allreduce(torch.tensor(0, device=self.device()), name="barrier")
hvd.allreduce(torch.tensor(0, device="cpu"), name="barrier")
|
from typing import List, Tuple, Type, TYPE_CHECKING, Union
from ignite.distributed.comp_models.base import _SerialModel
from ignite.distributed.comp_models.horovod import has_hvd_support
from ignite.distributed.comp_models.native import has_native_dist_support
from ignite.distributed.comp_models.xla import has_xla_support
if TYPE_CHECKING:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
from ignite.distributed.comp_models.native import _NativeDistModel
from ignite.distributed.comp_models.xla import _XlaDistModel
def setup_available_computation_models() -> (
Tuple[Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]], ...]
):
models: List[Type[Union[_SerialModel, "_NativeDistModel", "_XlaDistModel", "_HorovodDistModel"]]] = [
_SerialModel,
]
if has_native_dist_support:
from ignite.distributed.comp_models.native import _NativeDistModel
models.append(_NativeDistModel)
if has_xla_support:
from ignite.distributed.comp_models.xla import _XlaDistModel
models.append(_XlaDistModel)
if has_hvd_support:
from ignite.distributed.comp_models.horovod import _HorovodDistModel
models.append(_HorovodDistModel)
return tuple(models)
registered_computation_models = setup_available_computation_models()
|
import os
import re
import subprocess
import warnings
from typing import Any, Callable, cast, Dict, List, Mapping, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from packaging.version import Version
from ignite.distributed.comp_models.base import ComputationModel
has_native_dist_support = dist.is_available()
if has_native_dist_support:
NCCL = dist.Backend.NCCL
GLOO = dist.Backend.GLOO
MPI = dist.Backend.MPI
class _NativeDistModel(ComputationModel):
"""Private class for PyTorch native distributed computation model.
Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:
- NCCL
- GLOO
- MPI
In this implementation we assume the following mapping between backend and devices:
- NCCL <-> GPU
- GLOO <-> CPU or GPU
- MPI <-> CPU
"""
name = "native-dist"
available_backends = tuple(name for name in [NCCL, GLOO, MPI] if getattr(dist, f"is_{name}_available")())
@staticmethod
def create_from_context() -> Optional["_NativeDistModel"]:
if not (dist.is_available() and dist.is_initialized()):
return None
return _NativeDistModel()
@staticmethod
def create_from_backend(
backend: str,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> "_NativeDistModel":
if backend not in _NativeDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_NativeDistModel.available_backends}'")
if dist.is_available() and dist.is_initialized():
raise RuntimeError("Can not create new distributed process group if default one is already initialized")
if init_method is None:
if world_size is not None or rank is not None:
raise ValueError("Arguments rank and world_size should be None if no init_method is provided")
else:
has_rank = rank is not None
has_ws = world_size is not None
if (has_rank or has_ws) and (not has_rank or not has_ws):
raise ValueError(f"Both rank and world_size should be provided, but given {rank} and {world_size}")
return _NativeDistModel(
backend=backend, init_method=init_method, world_size=world_size, rank=rank, **kwargs
)
def __init__(
self,
backend: Optional[str] = None,
timeout: Optional[int] = None,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> None:
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_NativeDistModel, self).__init__()
self._env_backup: Optional[Dict[str, str]] = None
self._local_rank: Optional[int] = None
self._master_port: Optional[int] = None
self._master_addr: Optional[str] = None
self._init_method: Optional[str] = None
if backend is not None:
self._create_from_backend(
backend, timeout=timeout, init_method=init_method, world_size=world_size, rank=rank, **kwargs
)
else:
self._init_from_context()
def _create_from_backend(
self,
backend: str,
timeout: Optional[int] = None,
init_method: Optional[str] = None,
world_size: Optional[int] = None,
rank: Optional[int] = None,
**kwargs: Any,
) -> None:
if backend == dist.Backend.NCCL and not torch.cuda.is_available():
raise RuntimeError("Nccl backend is required but no cuda capable devices")
self._backend = backend
self.setup_env_vars(rank, world_size)
init_pg_kwargs: Dict[str, Any] = {}
if timeout is not None:
init_pg_kwargs["timeout"] = timeout
if init_method is None:
init_method = "env://"
if "env" not in init_method:
init_pg_kwargs["world_size"] = int(os.environ["WORLD_SIZE"])
init_pg_kwargs["rank"] = int(os.environ["RANK"])
self._init_method = init_method
dist.init_process_group(backend, init_method=init_method, **init_pg_kwargs)
if torch.cuda.is_available():
torch.cuda.set_device(self._local_rank)
# Call barrier after init_process_group as in
# https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
# Define device ids for NCCL to avoid warnings
# [W ProcessGroupNCCL.cpp:1569] Rank 0 using best-guess GPU 0 to perform barrier as devices used by
# this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping
# is incorrect.Specify device_ids in barrier() to force use of a particular device.
if backend == dist.Backend.NCCL and Version(torch.__version__) >= Version("1.8.0"):
device_ids = [torch.cuda.current_device()]
dist.barrier(device_ids=device_ids)
else:
# For older versions there is no device_ids arg
dist.barrier()
self._setup_attrs()
def _init_from_context(self) -> None:
self._backend = dist.get_backend()
self._identify_local_rank()
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
local_rank = self.get_local_rank()
# Create new cpu group to get nproc_per_node such we avoid using
# badly configured NCCL
gloo_group = dist.new_group(backend="gloo")
tensor = torch.tensor([local_rank + 1]).to("cpu")
dist.all_reduce(tensor, op=dist.ReduceOp.MAX, group=gloo_group)
dist.destroy_process_group(gloo_group)
return int(tensor.item())
def _get_all_hostnames(self) -> List[Tuple[str, ...]]:
import socket
device = "cpu"
if torch.cuda.is_available():
index = torch.cuda.current_device()
device = f"cuda:{index}"
hostname = socket.gethostname()
name = torch.tensor(bytearray(hostname, "utf-8")).to(device)
padded_t_name = torch.zeros(256, device=device, dtype=torch.long)
padded_t_name[: len(name)] = name
out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]
dist.all_gather(out_t_names, padded_t_name)
return [tuple(t.cpu().tolist()) for t in out_t_names]
@staticmethod
def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) -> Tuple[int, int]:
from collections import Counter
c: Counter = Counter(hostnames)
sizes = torch.tensor([0] + list(c.values()))
cumsum_sizes = torch.cumsum(sizes, dim=0)
node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()
local_rank = rank - cumsum_sizes[node_rank].item()
return int(local_rank), node_rank
def _compute_local_rank_via_hostname(self) -> int:
# get all hostnames
hostnames = self._get_all_hostnames()
local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)
if local_rank < 0 or self._node < 0:
raise ValueError(
"Failed to correctly estimate local rank. "
f"Debugging info: local rank: {local_rank}, node rank: {self._node}, hostnames: {hostnames}"
)
return local_rank
def _identify_local_rank(self) -> None:
if "SLURM_JOB_ID" in os.environ:
os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"]
if "LOCAL_RANK" in os.environ:
self._local_rank = int(os.environ["LOCAL_RANK"])
elif self._ext_local_rank is not None:
self._local_rank = self._ext_local_rank
else:
warnings.warn(
"Local rank information for native distributed setting will be initialized using "
"a heuristic approach based on the hostnames. In some corner cases, determined "
"local rank can be different from the real setup. To avoid this warning, "
"please either set `os.environ['LOCAL_RANK']` "
"or use `idist.set_local_rank(local_rank)` with correct local rank index."
)
# use socket gethostname heuristic to determine number of nodes => local rank
self._local_rank = self._compute_local_rank_via_hostname()
def setup_env_vars(self, rank: Optional[int] = None, world_size: Optional[int] = None) -> None:
self._env_backup = os.environ.copy()
if "SLURM_JOB_ID" in os.environ:
if rank is not None or world_size is not None:
raise ValueError("Arguments rank and world_size should not be specified with SLURM")
self._setup_env_in_slurm()
else:
env_vars = ["RANK", "LOCAL_RANK", "WORLD_SIZE"]
all_env_vars_defined = [k in os.environ for k in env_vars]
# check if all necessary env vars are set
# if partially defined raise an error
if any(all_env_vars_defined) and not all(all_env_vars_defined):
raise RuntimeError(f"PyTorch distributed configuration should define env variables '{env_vars}'")
os.environ["RANK"] = os.environ.get("RANK", f"{rank if rank is not None else 0}")
os.environ["WORLD_SIZE"] = os.environ.get(
"WORLD_SIZE", f"{world_size if world_size is not None else 1}"
)
os.environ["LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
os.environ["MASTER_PORT"] = os.environ.get("MASTER_PORT", "15000")
os.environ["MASTER_ADDR"] = os.environ.get("MASTER_ADDR", "127.0.0.1")
self._local_rank = int(os.environ["LOCAL_RANK"])
self._master_addr = os.environ["MASTER_ADDR"]
self._master_port = int(os.environ["MASTER_PORT"])
def _setup_env_in_slurm(self) -> None:
slurm_env_req_vars = [
"SLURM_JOB_ID",
"SLURM_PROCID",
"SLURM_LOCALID",
"SLURM_NTASKS",
"SLURM_JOB_NODELIST",
"SLURM_JOB_NUM_NODES",
]
for k in slurm_env_req_vars:
if k not in os.environ:
raise RuntimeError(f"SLURM distributed configuration is missing '{k}' in env variables")
ddp_vars = _setup_ddp_vars_from_slurm_env(cast(Dict, os.environ))
# define DDP env vars required by PTH:
for key, value in ddp_vars.items():
os.environ[key] = str(value)
def get_local_rank(self) -> int:
return cast(int, self._local_rank)
def get_rank(self) -> int:
return dist.get_rank()
def get_world_size(self) -> int:
return dist.get_world_size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
if torch.cuda.is_available():
index = torch.cuda.current_device()
if index < self.get_local_rank():
warnings.warn(
"Current device index is less than current local rank. "
"Please, make sure to call torch.cuda.set_device(local_rank)."
)
return torch.device(f"cuda:{index}")
return torch.device("cpu")
def backend(self) -> str:
return dist.get_backend()
def finalize(self) -> None:
dist.destroy_process_group()
# restore backed-up env
self._restore_env()
def _restore_env(self) -> None:
# restore backed-up env
if self._env_backup is not None:
os.environ.clear()
os.environ.update(self._env_backup)
@staticmethod
def _dist_worker_task_fn(
local_rank: int,
backend: str,
fn: Callable,
args: Tuple,
kw_dict: Mapping,
world_size: int,
nprocs_per_node: int,
node_rank: int,
master_addr: Optional[str],
master_port: Optional[str],
init_method: str,
kw: Any,
) -> None:
from ignite.distributed.utils import _set_model, finalize
copy_env_vars = os.environ.copy()
rank = node_rank * nprocs_per_node + local_rank
os.environ["LOCAL_RANK"] = str(local_rank)
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
arg_world_size: Optional[int] = world_size
arg_rank: Optional[int] = rank
if init_method == "env://":
os.environ["MASTER_ADDR"] = str(master_addr)
os.environ["MASTER_PORT"] = str(master_port)
arg_world_size = None
arg_rank = None
model = _NativeDistModel.create_from_backend(
backend, init_method=init_method, world_size=arg_world_size, rank=arg_rank, **kw
)
_set_model(model)
fn(local_rank, *args, **kw_dict)
finalize()
os.environ.clear()
os.environ.update(copy_env_vars)
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
nnodes: int = 1,
node_rank: int = 0,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
backend: str = "nccl",
init_method: Optional[str] = None,
**kwargs: Any,
) -> None:
world_size = nnodes * nproc_per_node
spawn_kwargs = {
"join": kwargs.get("join", True),
"daemon": kwargs.get("daemon", False),
}
start_processes = mp.spawn
# start_method and start_processes in pytorch >= 1.5
if Version(torch.__version__) >= Version("1.5.0"):
import builtins
if "__IPYTHON__" in builtins.__dict__:
# use fork in jupyter
default_start_method = "fork"
else:
default_start_method = "spawn"
spawn_kwargs["start_method"] = kwargs.get("start_method", default_start_method)
start_processes = mp.start_processes
# TODO: `spawn` wrongfully does not adopt address and port from environment if `init_method` is "env://"
if init_method in [None, "env://"]:
init_method = "env://"
if master_port is None:
master_port = 2222
if master_addr is None:
master_addr = "127.0.0.1"
elif master_addr is not None:
raise ValueError("master_addr should be None if init_method is provided other then 'env://'")
elif master_port is not None:
raise ValueError("master_port should be None if init_method is provided other then 'env://'")
start_processes(
_NativeDistModel._dist_worker_task_fn,
nprocs=nproc_per_node,
args=(
backend,
fn,
args,
kwargs_dict,
world_size,
nproc_per_node,
node_rank,
master_addr,
master_port,
init_method,
kwargs,
),
**spawn_kwargs,
)
_reduce_op_map = {
"SUM": dist.ReduceOp.SUM,
"PRODUCT": dist.ReduceOp.PRODUCT,
"MIN": dist.ReduceOp.MIN,
"MAX": dist.ReduceOp.MAX,
"AND": dist.ReduceOp.BAND,
"OR": dist.ReduceOp.BOR,
}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if op not in self._reduce_op_map:
raise ValueError(f"Unsupported reduction operation: '{op}'")
if group is not None and not isinstance(group, dist.ProcessGroup):
raise ValueError("Argument group should be list of int or ProcessGroup")
reduce_op = self._reduce_op_map[op]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_reduce(tensor, reduce_op, group=group)
else:
dist.all_reduce(tensor, reduce_op)
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
if group == dist.GroupMember.NON_GROUP_MEMBER:
return tensor
if group is None:
group_size = self.get_world_size()
elif isinstance(group, dist.ProcessGroup):
group_size = group.size()
else:
raise ValueError("Argument group should be list of int or ProcessGroup")
if tensor.ndimension() == 0:
tensor = tensor.unsqueeze(0)
output = [torch.zeros_like(tensor) for _ in range(group_size)]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_gather(output, tensor, group=group)
else:
dist.all_gather(output, tensor)
return torch.cat(output, dim=0)
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
if Version(torch.__version__) < Version("1.7.0"):
raise RuntimeError(
"Current torch version does not implement dist.all_gather_object. "
"Required version should be >=1.7.0"
)
if group == dist.GroupMember.NON_GROUP_MEMBER:
return tensor
if group is None:
group_size = self.get_world_size()
elif isinstance(group, dist.ProcessGroup):
group_size = group.size()
else:
raise ValueError("Argument group should be list of int or ProcessGroup")
output = [None for _ in range(group_size)]
# We do if/else here for compatibility with older pytorch versions
if group is not None:
dist.all_gather_object(output, tensor, group=group)
else:
dist.all_gather_object(output, tensor)
return output
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return dist.new_group(ranks=ranks, **kwargs)
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
dist.broadcast(tensor, src=src)
return tensor
def barrier(self) -> None:
dist.barrier()
def _expand_hostlist(nodelist: str) -> List[str]:
"""Expand a compressed hostlist string and returns all hosts listed.
Source : https://github.com/LLNL/py-hostlist/blob/master/hostlist/hostlist.py
Args:
nodelist: Compressed hostlist string
.. note::
The host names can be composed by any character except the special ones `[`, `]`, `,`. Only one
sequence `[...]` is supported per hostname.
.. versionadded:: 0.4.6
"""
result_hostlist = []
nodelist_match = r"([^,\[\]]+\[[^\[\]]*\][^,\[\]]*|[^,\[\]]*),?"
nodelist = nodelist.replace(" ", "")
for node in re.findall(nodelist_match, nodelist):
node_match = r"(.+)\[((,?[0-9]+-?,?-?){0,})\](.*)?"
match = re.search(node_match, node)
if match is None:
if node:
result_hostlist.append(node)
else:
# holds the ranges of nodes as a string
# now we can manipulate the string and cast it to a list of numbers
num = str(match.group(2)).replace("[", "").replace("]", "")
if len(num) == 0:
raise ValueError(f"hostlist invalid : {nodelist}")
num_list = num.split(",")
# find range of node numbers
ranges = [elem.split("-") if "-" in elem else [elem, elem] for elem in num_list]
# if the node numbers contain leading zeros, store them to be
lead_zeros = max([len(s) - len(s.lstrip("0")) for s, _ in ranges])
# list of expanded ranges of node numbers
nodes_list = [list(range(int(s), int(e) + 1)) for s, e in ranges]
# flat the list
final_list = [item for sublist in nodes_list for item in sublist]
# put final list in ascending order and append cluster name to each node number
final_list = list(sorted(set(final_list)))
# prepend leading zeros to numbers required
hostlist_tmp = [str(elem).zfill(lead_zeros + 1) for elem in final_list]
# append hostname to the node numbers
hostlist_no_suffix = [match.group(1) + elem for elem in hostlist_tmp]
# append suffix to hostlist if there is one
final_hostlist = [elem + match.group(4) for elem in hostlist_no_suffix]
result_hostlist += final_hostlist
return result_hostlist
def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]:
"""Method to setup DDP env vars required by PyTorch from SLURM env"""
# 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc
# See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh
# 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM
# To cover case 1), let's ensure that defined RANK == SLURM_PROCID, LOCAL_RANK == SLURM_LOCALID,
# WORLD_SIZE == SLURM_NTASKS. We will use defined MASTER_ADDR and MASTER_PORT instead of defining
# them by our means
# To cover case 2), let's check that defined RANK >= SLURM_PROCID, LOCAL_RANK >= SLURM_LOCALID,
# WORLD_SIZE >= SLURM_NTASKS, SLURM_JOB_NUM_NODES == 1
ddp_vars: Dict[str, Union[str, int, None]] = {
"RANK": int(environ["SLURM_PROCID"]),
"LOCAL_RANK": int(environ["SLURM_LOCALID"]),
"WORLD_SIZE": int(environ["SLURM_NTASKS"]),
"MASTER_ADDR": None,
"MASTER_PORT": None,
}
pth_ddp_env_vars = {key: environ.get(key, None) for key in ddp_vars}
defined_pth_ddp_env_vars = [v is not None for v in pth_ddp_env_vars.values()]
if all(defined_pth_ddp_env_vars):
nnodes = int(environ["SLURM_JOB_NUM_NODES"])
if nnodes > 1:
# ensure that all pth_ddp_env_vars are consistent with slurm vars
for key in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
slurm_var = cast(int, ddp_vars[key])
pth_var = int(cast(str, pth_ddp_env_vars[key]))
if slurm_var != pth_var:
raise RuntimeError(
"Environment variable defined for PyTorch Distributed context is inconsistent with "
f"equivalent SLURM env variable. {key}: {pth_var} vs {slurm_var}\n"
f"SLURM vars: {ddp_vars}\n"
f"PTH vars: {pth_ddp_env_vars}\n"
)
else:
# ensure that PTH RANK >= SLURM_PROCID, PTH LOCAL_RANK >= SLURM_LOCALID,
# PTH WORLD_SIZE >= SLURM_NTASKS
for key in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
slurm_var = cast(int, ddp_vars[key])
pth_var = int(cast(str, pth_ddp_env_vars[key]))
if pth_var < slurm_var:
raise RuntimeError(
"Environment variable defined for PyTorch Distributed context is "
"inconsistent with equivalent SLURM env variable. "
f"We expect that {key}: {pth_var} >= {slurm_var}\n"
f"SLURM vars: {ddp_vars}\n"
f"PTH vars: {pth_ddp_env_vars}\n"
)
ddp_vars[key] = pth_var
# set up MASTER_ADDR and MASTER_PORT from PTH
ddp_vars["MASTER_ADDR"] = cast(str, pth_ddp_env_vars["MASTER_ADDR"])
ddp_vars["MASTER_PORT"] = int(cast(str, pth_ddp_env_vars["MASTER_PORT"]))
elif any(defined_pth_ddp_env_vars):
# Let's warn user about PTH env variables that we could not taken into account
warnings.warn(
"We detected the following env variables: "
f"{[(k, v) for k, v in pth_ddp_env_vars.items() if v is not None]},\n"
"but will not take them into account as the following env vars are missing:"
f"{[k for k, v in pth_ddp_env_vars.items() if v is None]},\n"
)
if ddp_vars["MASTER_ADDR"] is None:
nodelist = environ["SLURM_JOB_NODELIST"]
try:
# use scontrol to expand hostname list
hostnames = subprocess.check_output(["scontrol", "show", "hostnames", nodelist])
method = "scontrol"
except FileNotFoundError:
# expand hostname list as scontrol
hostnames = " ".join(_expand_hostlist(nodelist)).encode("utf-8")
method = "ignite"
# at least one hostname should be defined
hostname_list = hostnames.split()
if len(hostname_list) < 1:
raise RuntimeError(f"No hostname detected in SLURM_JOB_NODELIST by {method} (nodelist={nodelist})")
# master address is the first hostname of nodes list
ddp_vars["MASTER_ADDR"] = str(hostname_list[0].decode("utf-8"))
if ddp_vars["MASTER_PORT"] is None:
# port should be the same over all process
slurm_port = environ["SLURM_JOB_ID"]
slurm_port = slurm_port[-4:]
ddp_vars["MASTER_PORT"] = int(slurm_port) + 15000
return cast(Dict[str, Union[str, int]], ddp_vars)
|
from typing import Any, Callable, cast, List, Mapping, Optional, Tuple
import torch
from ignite.distributed.comp_models.base import ComputationModel
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
has_xla_support = True
except ImportError:
has_xla_support = False
if has_xla_support:
XLA_TPU = "xla-tpu"
class _XlaDistModel(ComputationModel):
"""Private class for PyTorch XLA basic distributed computation model.
It handles single/multi-device computation model.
Supported XLA devices:
- CPU
- TPU
"""
name = "xla-dist"
available_backends = (XLA_TPU,)
@staticmethod
def create_from_context() -> Optional["_XlaDistModel"]:
return _XlaDistModel()
@staticmethod
def create_from_backend(backend: str = XLA_TPU, **kwargs: Any) -> "_XlaDistModel":
if backend not in _XlaDistModel.available_backends:
raise ValueError(f"Backend should be one of '{_XlaDistModel.available_backends}'")
return _XlaDistModel(backend=backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any):
"""This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_XlaDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
else:
self._init_from_context()
def _create_from_backend(self, backend: str, **kwargs: Any) -> None:
xm.rendezvous("init")
self._backend: str = backend
self._setup_attrs()
def _init_from_context(self) -> None:
self._backend = XLA_TPU
self._setup_attrs()
def _compute_nproc_per_node(self) -> int:
tensor = torch.tensor([self.get_local_rank() + 1.0], dtype=torch.float).to(self.device())
xm.all_reduce("max", [tensor])
return int(tensor.item())
def get_local_rank(self) -> int:
return xm.get_local_ordinal()
def get_rank(self) -> int:
return xm.get_ordinal()
def get_world_size(self) -> int:
return xm.xrt_world_size()
def get_nproc_per_node(self) -> int:
return cast(int, self._nproc_per_node)
def get_nnodes(self) -> int:
return cast(int, self._nnodes)
def get_node_rank(self) -> int:
return cast(int, self._node)
def device(self) -> torch.device:
dev = torch_xla._XLAC._xla_get_default_device()
return torch.device(dev)
def backend(self) -> str:
return self._backend
def finalize(self) -> None:
pass
@staticmethod
def _dist_worker_task_fn(
local_rank: int, backend: str, fn: Callable, args: Tuple, kwargs_dict: Mapping
) -> None:
from ignite.distributed.utils import _set_model, finalize
model = _XlaDistModel.create_from_backend(backend)
_set_model(model)
fn(local_rank, *args, **kwargs_dict)
finalize()
@staticmethod
def spawn(
fn: Callable,
args: Tuple,
kwargs_dict: Optional[Mapping] = None,
nproc_per_node: int = 1,
nnodes: int = 1,
node_rank: int = 0,
backend: str = XLA_TPU,
**kwargs: Any,
) -> None:
if "start_method" not in kwargs:
kwargs["start_method"] = "fork"
xmp.spawn(
_XlaDistModel._dist_worker_task_fn,
args=(backend, fn, args, kwargs_dict),
nprocs=nproc_per_node,
**kwargs,
)
_collective_op_dtype = torch.float32
_reduce_op_map = {
"SUM": "sum",
"PRODUCT": "mul",
"MIN": "min",
"MAX": "max",
"AND": "and",
"OR": "or",
}
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
if group is not None and not self._check_group_type(group):
raise ValueError("Argument group should be list of int")
op = self._reduce_op_map[op]
xm.all_reduce(op, [tensor], groups=group)
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
# from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb
if group is not None and (not isinstance(group, list) or not all(isinstance(item, int) for item in group)):
raise ValueError("Argument group should be list of int")
group_size = self.get_world_size()
output = torch.zeros((group_size,) + tensor.shape, dtype=tensor.dtype, device=tensor.device)
output[self.get_rank() % group_size] = tensor
xm.all_reduce("sum", [output], groups=group)
return output.reshape(-1, *output.shape[2:])
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
raise NotImplementedError("all_gather on object is not implemented for xla")
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return [ranks]
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
# from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb
if src != self.get_rank():
tensor.fill_(0.0)
xm.all_reduce("sum", [tensor])
return tensor
def barrier(self) -> None:
xm.rendezvous("barrier")
def _check_group_type(self, group: Optional[Any]) -> bool:
if isinstance(group, list) and all(isinstance(item, int) for item in group):
return True
return False
|
from abc import ABCMeta, abstractmethod
from numbers import Number
from typing import Any, Callable, cast, List, Optional, Union
import torch
class ComputationModel(metaclass=ABCMeta):
"""Base class for distributed computation models and defines interface methods.
This class is public and should be used for other custom derived distributed models.
"""
# this is an additional local rank storage used when idist is setup from existing native torch dist context
_ext_local_rank: Optional[int] = None
def __init__(self) -> None:
self._backend: Optional[str] = None
self._nproc_per_node: Optional[int] = None
self._nnodes: Optional[int] = None
self._node: Optional[int] = None
def _setup_attrs(self) -> None:
if self._nproc_per_node is None:
self._nproc_per_node = self._compute_nproc_per_node() if self.get_world_size() > 1 else 1
if self._nnodes is None:
self._nnodes = self.get_world_size() // self._nproc_per_node
if self._node is None:
self._node = self.get_rank() // self._nproc_per_node
@abstractmethod
def _compute_nproc_per_node(self) -> int:
pass
@abstractmethod
def get_local_rank(self) -> int:
pass
@abstractmethod
def get_rank(self) -> int:
pass
@abstractmethod
def get_world_size(self) -> int:
pass
@abstractmethod
def get_nproc_per_node(self) -> int:
pass
@abstractmethod
def get_nnodes(self) -> int:
pass
@abstractmethod
def get_node_rank(self) -> int:
pass
@abstractmethod
def device(self) -> torch.device:
pass
@abstractmethod
def backend(self) -> Optional[str]:
pass
@abstractmethod
def finalize(self) -> None:
pass
@staticmethod
@abstractmethod
def create_from_context() -> Optional["ComputationModel"]:
pass
@staticmethod
@abstractmethod
def create_from_backend(backend: str, **kwargs: Any) -> "ComputationModel":
pass
@staticmethod
@abstractmethod
def spawn(*args: Any, **kwargs: Any) -> None:
pass
_collective_op_dtype: Any = None
@staticmethod
def _encode_str(x: str, device: torch.device, size: int) -> torch.Tensor:
name = torch.tensor(bytearray(x, "utf-8")).to(device)
padded_x = torch.zeros(size + 1, device=device, dtype=torch.long)
padded_x[: len(name)] = name
padded_x[-1] = len(name)
# output is tensor of shape (1, size + 1)
return padded_x.unsqueeze(0)
def _get_max_length(self, x: str, device: torch.device) -> int:
size = torch.tensor([len(x)], device=device)
size = self._do_all_reduce(size, op="MAX")
return cast(int, size.item())
@staticmethod
def _encode_input_data(x: Union[torch.Tensor, float, str, None], is_src: bool) -> List[int]:
encoded_msg = [-1] * 512
if not is_src:
# Discard input type if not source
return encoded_msg
if isinstance(x, torch.Tensor):
shape = x.shape
dtype = str(x.dtype)
msg = [0, len(shape), *shape, len(dtype), *list(bytearray(dtype, "utf-8"))]
encoded_msg[: len(msg)] = msg
elif isinstance(x, Number):
encoded_msg[0] = 1
elif isinstance(x, str):
encoded_msg[0] = 2
return encoded_msg
@staticmethod
def _decode_as_placeholder(encoded_msg: List[int], device: torch.device) -> Union[torch.Tensor, float, str]:
if encoded_msg[0] == 0:
len_shape = encoded_msg[1]
le = 2 + len_shape
shape = encoded_msg[2:le] if len_shape > 0 else []
len_dtype = encoded_msg[le]
dtype_str = bytearray(encoded_msg[le + 1 : le + 1 + len_dtype]).decode("utf-8")
dtype = eval(dtype_str)
return torch.empty(shape, device=device, dtype=dtype)
elif encoded_msg[0] == 1:
return 0.0
elif encoded_msg[0] == 2:
return ""
else:
raise RuntimeError(f"Internal error: unhandled dtype {encoded_msg[0]}, encoded_msg={encoded_msg}")
def _setup_placeholder(
self, x: Union[torch.Tensor, float, str, None], device: torch.device, is_src: bool
) -> Union[torch.Tensor, float, str]:
encoded_msg_per_rank = self._encode_input_data(x, is_src)
encoded_msg_all_ranks = self._do_all_reduce(torch.tensor(encoded_msg_per_rank, device=device), op="MAX")
if is_src:
if x is None:
raise RuntimeError("Internal error, x is None. Please, file an issue if you encounter this error.")
return x
encoded_msg = encoded_msg_all_ranks.cpu().tolist()
return self._decode_as_placeholder(encoded_msg, device)
@staticmethod
def _decode_str(xs: torch.Tensor) -> List[str]:
# xs.shape = (n, size + 1), e.g. (world_size, size + 1)
out = [bytearray(x[: x[-1]].tolist()).decode("utf-8") for x in xs]
return out
def _apply_op(
self, tensor: torch.Tensor, device: torch.device, fn: Callable, *args: Any, **kwargs: Any
) -> torch.Tensor:
out_dtype = None
tensor_device = None
# check if the tensor is at specified device
if tensor.device != device:
tensor_device = tensor.device
tensor = tensor.to(device)
if self._collective_op_dtype is not None:
# cast to _collective_op_dtype if current type is not floatX
if tensor.dtype not in (torch.float32, torch.float64):
out_dtype = tensor.dtype
tensor = tensor.to(self._collective_op_dtype)
tensor = fn(tensor, *args, **kwargs)
if out_dtype is not None and tensor_device is not None:
return tensor.to(dtype=out_dtype, device=tensor_device)
if out_dtype is not None:
return tensor.to(dtype=out_dtype)
if tensor_device is not None:
return tensor.to(device=tensor_device)
return tensor
def _collective_op(
self, tensor: Union[torch.Tensor, Number, str], fn: Callable, *args: Any, **kwargs: Any
) -> Union[torch.Tensor, float, List[float], List[str]]:
tensor_to_number = tensor_to_str = False
device = self.device()
if isinstance(tensor, (Number, float)):
tensor_to_number = True
dtype = self._collective_op_dtype
if dtype is None and isinstance(tensor, float):
dtype = torch.double
tensor = torch.tensor(tensor, device=device, dtype=dtype)
elif isinstance(tensor, str):
tensor_to_str = True
max_length = self._get_max_length(tensor, device)
tensor = self._encode_str(tensor, device, size=max_length)
tensor = self._apply_op(tensor, device, fn, *args, **kwargs)
if tensor_to_number:
if tensor.numel() == 1:
return tensor.item()
else:
return tensor.tolist()
elif tensor_to_str:
return self._decode_str(tensor)
return tensor
def all_reduce(
self, tensor: Union[torch.Tensor, float], op: str = "sum", group: Optional[Any] = None
) -> Union[torch.Tensor, float]:
if not isinstance(tensor, (torch.Tensor, Number)):
raise TypeError(f"Unhandled input type {type(tensor)}")
return cast(Union[torch.Tensor, float], self._collective_op(tensor, self._do_all_reduce, op, group=group))
def all_gather(
self, tensor: Union[torch.Tensor, float, str, Any], group: Optional[Any] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
if not isinstance(tensor, (torch.Tensor, Number, str)):
return self._do_all_gather_object(tensor, group=group)
return self._collective_op(tensor, self._do_all_gather, group=group)
def new_group(self, ranks: List[int], **kwargs: Any) -> Any:
if isinstance(ranks, list) and all(isinstance(item, int) for item in ranks):
return self._do_new_group(ranks, **kwargs)
else:
raise ValueError("Argument ranks should be list of int")
def broadcast(
self, tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
if not (isinstance(tensor, (torch.Tensor, Number, str)) or tensor is None):
raise TypeError(f"Unhandled input type {type(tensor)}")
rank = self.get_rank()
if tensor is None:
if rank == src:
raise ValueError("Source data can not be None")
elif not safe_mode:
raise ValueError("Argument safe_mode should be True if None is passed for non src rank")
device = self.device()
tensor_to_number = tensor_to_str = False
if safe_mode:
tensor = self._setup_placeholder(tensor, device, rank == src)
if tensor is None:
raise RuntimeError("Internal error, tensor is None. Please, file an issue if you encounter this error.")
if isinstance(tensor, (Number, float)): # have to use Number and float to satisfy mypy
tensor_to_number = True
if rank != src:
tensor = torch.empty(1, device=device, dtype=torch.float)
else:
tensor = torch.tensor([tensor], device=device, dtype=torch.float)
elif isinstance(tensor, str):
tensor_to_str = True
max_length = self._get_max_length(tensor, device)
if rank != src:
tensor = torch.empty(1, max_length + 1, device=device, dtype=torch.long)
else:
tensor = self._encode_str(tensor, device, size=max_length)
tensor = self._apply_op(tensor, device, self._do_broadcast, src)
if tensor_to_number:
return tensor.item()
if tensor_to_str:
list_str = self._decode_str(tensor)
return list_str[0]
return tensor
@abstractmethod
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
pass
@abstractmethod
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
pass
@abstractmethod
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> List[Any]:
pass
@abstractmethod
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
pass
@abstractmethod
def barrier(self) -> None:
pass
@abstractmethod
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
pass
class _SerialModel(ComputationModel):
"""Private class defines non-distributed computation model for code compatibility with other distributed models."""
name = "serial"
available_backends = ()
def __init__(self, _backend: Optional[str] = None, **kwargs: Any) -> None:
super(_SerialModel, self).__init__()
def get_local_rank(self) -> int:
return 0
def get_rank(self) -> int:
return 0
def get_world_size(self) -> int:
return 1
def get_nproc_per_node(self) -> int:
return 1
def get_nnodes(self) -> int:
return 1
def get_node_rank(self) -> int:
return 0
def device(self) -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
return torch.device("cpu")
def backend(self) -> Optional[str]:
return None
def finalize(self) -> None:
pass
def _compute_nproc_per_node(self) -> int:
return 1
@staticmethod
def create_from_context() -> "_SerialModel":
return _SerialModel()
@staticmethod
def create_from_backend(backend: Optional[str] = None, **kwargs: Any) -> "_SerialModel":
return _SerialModel()
@staticmethod
def spawn(*args: Any, **kwargs: Any) -> None:
raise NotImplementedError("Serial computation model does not implement spawn method")
def all_reduce(
self, tensor: Union[torch.Tensor, float], op: str = "SUM", group: Optional[Any] = None
) -> Union[torch.Tensor, float]:
return tensor
def all_gather(
self, tensor: Union[torch.Tensor, float, str], group: Optional[Any] = None
) -> Union[torch.Tensor, float, List[float], List[str]]:
if isinstance(tensor, torch.Tensor):
return tensor
return cast(Union[List[float], List[str]], [tensor])
def broadcast(
self, tensor: Union[torch.Tensor, float, str, None], src: int = 0, safe_mode: bool = False
) -> Union[torch.Tensor, float, str]:
if tensor is None:
raise ValueError("Argument tensor should not be None")
return tensor
def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM", group: Optional[Any] = None) -> torch.Tensor:
return tensor
def _do_all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None) -> torch.Tensor:
return tensor
def _do_all_gather_object(self, tensor: Any, group: Optional[Any] = None) -> Any:
return tensor
def _do_new_group(self, ranks: List[int], **kwargs: Any) -> Any:
return ranks
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
return tensor
def barrier(self) -> None:
pass
def new_group(self, ranks: List[int], **kwargs: Any) -> Any:
if isinstance(ranks, list) and all(isinstance(item, int) for item in ranks):
return self._do_new_group(ranks, **kwargs)
else:
raise ValueError("Argument ranks should be list of int")
|
# -*- coding: utf-8 -*-
import warnings
from typing import Any, Dict, List, Tuple, Union
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.metrics import Metric
class GpuInfo(Metric):
"""Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
on each iterations.
.. Note ::
In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
Examples:
.. code-block:: python
# Default GPU measurements
GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
# Progress bar will looks like
# Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
# Logging with Tensorboard
tb_logger.attach(trainer,
log_handler=OutputHandler(tag="training", metric_names='all'),
event_name=Events.ITERATION_COMPLETED)
"""
def __init__(self) -> None:
try:
from pynvml.smi import nvidia_smi
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires pynvml to be installed. "
"Please install it with command: \n pip install pynvml"
)
# Let's check available devices
if not torch.cuda.is_available():
raise RuntimeError("This contrib module requires available GPU")
# Let it fail if no libnvidia drivers or NMVL library found
self.nvsmi = nvidia_smi.getInstance()
super(GpuInfo, self).__init__()
def reset(self) -> None:
pass
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
pass
def compute(self) -> List[Dict[str, Any]]:
data: Dict[str, List[Dict[str, Any]]] = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
if len(data) == 0 or ("gpu" not in data):
warnings.warn("No GPU information available")
return []
return data["gpu"]
def completed(self, engine: Engine, name: str) -> None:
data = self.compute()
if len(data) < 1:
warnings.warn("No GPU information available")
return
for i, data_by_rank in enumerate(data):
mem_name = f"{name}:{i} mem(%)"
if "fb_memory_usage" not in data_by_rank:
warnings.warn(f"No GPU memory usage information available in {data_by_rank}")
continue
mem_report = data_by_rank["fb_memory_usage"]
if not ("used" in mem_report and "total" in mem_report):
warnings.warn(
"GPU memory usage information does not provide used/total "
f"memory consumption information in {mem_report}"
)
continue
engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
for i, data_by_rank in enumerate(data):
util_name = f"{name}:{i} util(%)"
if "utilization" not in data_by_rank:
warnings.warn(f"No GPU utilization information available in {data_by_rank}")
continue
util_report = data_by_rank["utilization"]
if not ("gpu_util" in util_report):
warnings.warn(f"GPU utilization information does not provide 'gpu_util' information in {util_report}")
continue
try:
engine.state.metrics[util_name] = int(util_report["gpu_util"])
except ValueError:
# Do not set GPU utilization information
pass
# TODO: see issue https://github.com/pytorch/ignite/issues/1405
def attach( # type: ignore
self, engine: Engine, name: str = "gpu", event_name: Union[str, EventEnum] = Events.ITERATION_COMPLETED
) -> None:
engine.add_event_handler(event_name, self.completed, name)
|
from typing import Any, Callable, cast, Tuple, Union
import torch
from ignite import distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def roc_auc_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
from sklearn.metrics import roc_auc_score
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return roc_auc_score(y_true, y_pred)
def roc_auc_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
from sklearn.metrics import roc_curve
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return roc_curve(y_true, y_pred)
class ROC_AUC(EpochMetric):
"""Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC)
accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `roc_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#
sklearn.metrics.roc_auc_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = ROC_AUC(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
roc_auc = ROC_AUC()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
y_true = torch.tensor([[0], [0], [1], [0]])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['roc_auc'])
.. testoutput::
0.6666...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import roc_auc_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(ROC_AUC, self).__init__(
roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device
)
class RocCurve(EpochMetric):
"""Compute Receiver operating characteristic (ROC) for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.roc_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_curve.html#sklearn.metrics.roc_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `sklearn.metrics.roc_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html#
sklearn.metrics.roc_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = RocCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
roc_auc = RocCurve()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])
.. testoutput::
FPR [0.0, 0.333, 0.333, 1.0]
TPR [0.0, 0.0, 1.0, 1.0]
Thresholds [inf, 1.0, 0.711, 0.047]
.. versionchanged:: 0.4.11
added `device` argument
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
try:
from sklearn.metrics import roc_curve # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(RocCurve, self).__init__(
roc_auc_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("RocCurve must have at least one example before it can be computed.")
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
fpr, tpr, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
fpr = torch.tensor(fpr, device=_prediction_tensor.device)
tpr = torch.tensor(tpr, device=_prediction_tensor.device)
thresholds = torch.tensor(thresholds, device=_prediction_tensor.device)
else:
fpr, tpr, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
fpr = idist.broadcast(fpr, src=0, safe_mode=True)
tpr = idist.broadcast(tpr, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
return fpr, tpr, thresholds
|
from typing import Any, Callable, cast, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
try:
from sklearn.metrics import precision_recall_curve
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return precision_recall_curve(y_true, y_pred)
class PrecisionRecallCurve(EpochMetric):
"""Compute precision-recall pairs for different probability thresholds for binary classification task
by accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `precision_recall_curve
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
#sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
Note:
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
.. testoutput::
Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
Recall [1.0, 1.0, 1.0, 0.5, 0.0]
Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(PrecisionRecallCurve, self).__init__(
precision_recall_curve_compute_fn, # type: ignore[arg-type]
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
precision = torch.tensor(precision, device=_prediction_tensor.device)
recall = torch.tensor(recall, device=_prediction_tensor.device)
# thresholds can have negative strides, not compatible with torch tensors
# https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
else:
precision, recall, thresholds = None, None, None
if ws > 1:
# broadcast result to all processes
precision = idist.broadcast(precision, src=0, safe_mode=True)
recall = idist.broadcast(recall, src=0, safe_mode=True)
thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
self._result = (precision, recall, thresholds) # type: ignore[assignment]
return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
|
import ignite.contrib.metrics.regression
from ignite.contrib.metrics.average_precision import AveragePrecision
from ignite.contrib.metrics.cohen_kappa import CohenKappa
from ignite.contrib.metrics.gpu_info import GpuInfo
from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.contrib.metrics.roc_auc import ROC_AUC, RocCurve
|
from typing import Callable, Union
import torch
from ignite.metrics import EpochMetric
def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
from sklearn.metrics import average_precision_score
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return average_precision_score(y_true, y_pred)
class AveragePrecision(EpochMetric):
"""Computes Average Precision accumulating predictions and the ground-truth during an epoch
and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: Default False. If True, `average_precision_score
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
#sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Note:
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
confidence values. To apply an activation to y_pred, use output_transform as shown below:
.. code-block:: python
def activated_output_transform(output):
y_pred, y = output
y_pred = torch.softmax(y_pred, dim=1)
return y_pred, y
avg_precision = AveragePrecision(activated_output_transform)
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
y_pred = torch.tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])
avg_precision = AveragePrecision()
avg_precision.attach(default_evaluator, 'average_precision')
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['average_precision'])
.. testoutput::
0.9166...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import average_precision_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
super(AveragePrecision, self).__init__(
average_precision_compute_fn,
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
|
from typing import Callable, Optional, Union
import torch
from ignite.metrics import EpochMetric
class CohenKappa(EpochMetric):
"""Compute different types of Cohen's Kappa: Non-Wieghted, Linear, Quadratic.
Accumulating predictions and the ground-truth during an epoch and applying
`sklearn.metrics.cohen_kappa_score <https://scikit-learn.org/stable/modules/
generated/sklearn.metrics.cohen_kappa_score.html>`_ .
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
weights: a string is used to define the type of Cohen's Kappa whether Non-Weighted or Linear
or Quadratic. Default, None.
check_compute_fn: Default False. If True, `cohen_kappa_score
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html>`_
is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = CohenKappa()
metric.attach(default_evaluator, 'ck')
y_true = torch.tensor([2, 0, 2, 2, 0, 1])
y_pred = torch.tensor([0, 0, 2, 2, 0, 2])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['ck'])
.. testoutput::
0.4285...
"""
def __init__(
self,
output_transform: Callable = lambda x: x,
weights: Optional[str] = None,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
try:
from sklearn.metrics import cohen_kappa_score # noqa: F401
except ImportError:
raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
if weights not in (None, "linear", "quadratic"):
raise ValueError("Kappa Weighting type must be None or linear or quadratic.")
# initalize weights
self.weights = weights
self.cohen_kappa_compute = self.get_cohen_kappa_fn()
super(CohenKappa, self).__init__(
self.cohen_kappa_compute,
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
)
def get_cohen_kappa_fn(self) -> Callable[[torch.Tensor, torch.Tensor], float]:
"""Return a function computing Cohen Kappa from scikit-learn."""
from sklearn.metrics import cohen_kappa_score
def wrapper(y_targets: torch.Tensor, y_preds: torch.Tensor) -> float:
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return cohen_kappa_score(y_true, y_pred, weights=self.weights)
return wrapper
|
from abc import abstractmethod
from typing import Tuple
import torch
from ignite.metrics import Metric
from ignite.metrics.metric import reinit__is_reduced
def _check_output_shapes(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1
if not (y_pred.ndimension() == 1 or c1):
raise ValueError(f"Input y_pred should have shape (N,) or (N, 1), but given {y_pred.shape}")
c2 = y.ndimension() == 2 and y.shape[1] == 1
if not (y.ndimension() == 1 or c2):
raise ValueError(f"Input y should have shape (N,) or (N, 1), but given {y.shape}")
if y_pred.shape != y.shape:
raise ValueError(f"Input data shapes should be the same, but given {y_pred.shape} and {y.shape}")
def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if y_pred.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError(f"Input y_pred dtype should be float 16, 32 or 64, but given {y_pred.dtype}")
if y.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError(f"Input y dtype should be float 16, 32 or 64, but given {y.dtype}")
def _torch_median(output: torch.Tensor) -> float:
output = output.view(-1)
len_ = len(output)
if len_ % 2 == 0:
return float((torch.kthvalue(output, len_ // 2)[0] + torch.kthvalue(output, len_ // 2 + 1)[0]) / 2)
else:
return float(torch.kthvalue(output, len_ // 2 + 1)[0])
class _BaseRegression(Metric):
# Base class for all regression metrics
# `update` method check the shapes and call internal overloaded
# method `_update`.
@reinit__is_reduced
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
_check_output_shapes(output)
_check_output_types(output)
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
y_pred = y_pred.squeeze(dim=-1)
if y.ndimension() == 2 and y.shape[1] == 1:
y = y.squeeze(dim=-1)
self._update((y_pred, y))
@abstractmethod
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
pass
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanAbsoluteRelativeError(_BaseRegression):
r"""Calculate Mean Absolute Relative Error.
.. math::
\text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanAbsoluteRelativeError()
metric.attach(default_evaluator, 'mare')
y_true = torch.tensor([1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mare'])
.. testoutput::
0.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device)
self._num_samples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if (y == 0).any():
raise NotComputableError("The ground truth has 0.")
absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))
self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)
self._num_samples += y.size()[0]
@sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples")
def compute(self) -> float:
if self._num_samples == 0:
raise NotComputableError(
"MeanAbsoluteRelativeError must have at least one sample before it can be computed."
)
return self._sum_of_absolute_relative_errors.item() / self._num_samples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class FractionalBias(_BaseRegression):
r"""Calculates the Fractional Bias.
.. math::
\text{FB} = \frac{1}{n}\sum_{j=1}^n\frac{2 (A_j - P_j)}{A_j + P_j}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FractionalBias()
metric.attach(default_evaluator, 'fractional_bias')
y_pred = torch.tensor([[3.8], [9.9], [5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_bias'])
.. testoutput::
0.4000...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * (y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("FractionalBias must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_absolute_percentage_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred))
return 100.0 * _torch_median(e)
class MedianAbsolutePercentageError(EpochMetric):
r"""Calculates the Median Absolute Percentage Error.
.. math::
\text{MdAPE} = 100 \cdot \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j|} \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianAbsolutePercentageError()
metric.attach(default_evaluator, 'mape')
y_true = torch.tensor([1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mape'])
.. testoutput::
25.0...
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianAbsolutePercentageError, self).__init__(
median_absolute_percentage_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import cast, List, Tuple
import torch
import ignite.distributed as idist
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced
class GeometricMeanRelativeAbsoluteError(_BaseRegression):
r"""Calculates the Geometric Mean Relative Absolute Error.
.. math::
\text{GMRAE} = \exp(\frac{1}{n}\sum_{j=1}^n \ln\frac{|A_j - P_j|}{|A_j - \bar{A}|})
where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value
and :math: `bar{A}` is the mean of the ground truth.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
.. warning::
Current implementation of GMRAE stores all input data (output and target)
as tensors before computing the metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
In distributed configuration, all stored data (output and target) is mutually collected across all processes
using all gather collective operation. This can potentially lead to a memory error.
Compute method compute the metric on zero rank process only and final result is broadcasted to
all processes.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricMeanRelativeAbsoluteError()
metric.attach(default_evaluator, 'gmare')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmare'])
.. testoutput::
0.0...
"""
@reinit__is_reduced
def reset(self) -> None:
self._predictions: List[torch.Tensor] = []
self._targets: List[torch.Tensor] = []
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
y_pred = y_pred.clone().to(self._device)
y = y.clone().to(self._device)
self._predictions.append(y_pred)
self._targets.append(y)
def compute(self) -> float:
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError(
"GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed."
)
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
result = torch.exp(
torch.log(
torch.abs(_target_tensor - _prediction_tensor) / torch.abs(_target_tensor - _target_tensor.mean())
).mean()
).item()
return result
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MaximumAbsoluteError(_BaseRegression):
r"""Calculates the Maximum Absolute Error.
.. math::
\text{MaxAE} = \max_{j=1,n} \left( \lvert A_j-P_j \rvert \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MaximumAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
.. testoutput::
1.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._max_of_absolute_errors: float = -1
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
mae = torch.abs(y_pred - y.view_as(y_pred)).max().item()
if self._max_of_absolute_errors < mae:
self._max_of_absolute_errors = mae
@sync_all_reduce("_max_of_absolute_errors:MAX")
def compute(self) -> float:
if self._max_of_absolute_errors < 0:
raise NotComputableError("MaximumAbsoluteError must have at least one example before it can be computed.")
return self._max_of_absolute_errors
|
from ignite.contrib.metrics.regression.canberra_metric import CanberraMetric
from ignite.contrib.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
from ignite.contrib.metrics.regression.fractional_bias import FractionalBias
from ignite.contrib.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
from ignite.contrib.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
from ignite.contrib.metrics.regression.manhattan_distance import ManhattanDistance
from ignite.contrib.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
from ignite.contrib.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
from ignite.contrib.metrics.regression.mean_error import MeanError
from ignite.contrib.metrics.regression.mean_normalized_bias import MeanNormalizedBias
from ignite.contrib.metrics.regression.median_absolute_error import MedianAbsoluteError
from ignite.contrib.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
from ignite.contrib.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
from ignite.contrib.metrics.regression.r2_score import R2Score
from ignite.contrib.metrics.regression.wave_hedges_distance import WaveHedgesDistance
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanError(_BaseRegression):
r"""Calculates the Mean Error.
.. math::
\text{ME} = \frac{1}{n}\sum_{j=1}^n (A_j - P_j)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanError()
metric.attach(default_evaluator, 'me')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['me'])
.. testoutput::
0.625...
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = y.view_as(y_pred) - y_pred
self._sum_of_errors += torch.sum(errors).item()
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("MeanError must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class GeometricMeanAbsoluteError(_BaseRegression):
r"""Calculates the Geometric Mean Absolute Error.
.. math::
\text{GMAE} = \exp(\frac{1}{n}\sum_{j=1}^n\ln(|A_j - P_j|))
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricMeanAbsoluteError()
metric.attach(default_evaluator, 'gmae')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmae'])
.. testoutput::
2.2723...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError(
"GeometricMeanAbsoluteError must have at least one example before it can be computed."
)
return torch.exp((self._sum_of_errors) / self._num_examples).item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class R2Score(_BaseRegression):
r"""Calculates the R-Squared, the
`coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_.
.. math::
R^2 = 1 - \frac{\sum_{j=1}^n(A_j - P_j)^2}{\sum_{j=1}^n(A_j - \bar{A})^2}
where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and
:math:`\bar{A}` is the mean of the ground truth.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = R2Score()
metric.attach(default_evaluator, 'r2')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['r2'])
.. testoutput::
0.8035...
.. versionchanged:: 0.4.3
Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._num_examples = 0
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._y_sq_sum = torch.tensor(0.0, device=self._device)
self._y_sum = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
self._num_examples += y.shape[0]
self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
self._y_sum += torch.sum(y).to(self._device)
self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
@sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("R2Score must have at least one example before it can be computed.")
return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred)
return _torch_median(e)
class MedianAbsoluteError(EpochMetric):
r"""Calculates the Median Absolute Error.
.. math::
\text{MdAE} = \text{MD}_{j=1,n} \left( |A_j - P_j| \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
.. testoutput::
0.625
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianAbsoluteError, self).__init__(
median_absolute_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import Callable, Union
import torch
from ignite.contrib.metrics.regression._base import _torch_median
from ignite.metrics import EpochMetric
def median_relative_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred) - torch.mean(y))
return _torch_median(e)
class MedianRelativeAbsoluteError(EpochMetric):
r"""Calculates the Median Relative Absolute Error.
.. math::
\text{MdRAE} = \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j - \bar{A}|} \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: optional device specification for internal storage.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MedianRelativeAbsoluteError()
metric.attach(default_evaluator, 'mrae')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mrae'])
.. testoutput::
0.5...
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianRelativeAbsoluteError, self).__init__(
median_relative_absolute_error_compute_fn, output_transform=output_transform, device=device
)
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class CanberraMetric(_BaseRegression):
r"""Calculates the Canberra Metric.
.. math::
\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
.. _scikit-learn distance metrics:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
.. _`Botchkarev 2018`:
https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = CanberraMetric()
metric.attach(default_evaluator, 'canberra')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['canberra'])
.. testoutput::
0.8000...
.. versionchanged:: 0.4.3
- Fixed implementation: ``abs`` in denominator.
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + 1e-15)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class WaveHedgesDistance(_BaseRegression):
r"""Calculates the Wave Hedges Distance.
.. math::
\text{WHD} = \sum_{j=1}^n\frac{|A_j - P_j|}{max(A_j, P_j)}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = WaveHedgesDistance()
metric.attach(default_evaluator, 'whd')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['whd'])
.. testoutput::
1.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class FractionalAbsoluteError(_BaseRegression):
r"""Calculates the Fractional Absolute Error.
.. math::
\text{FAE} = \frac{1}{n}\sum_{j=1}^n\frac{2 |A_j - P_j|}{|A_j| + |P_j|}
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FractionalAbsoluteError()
metric.attach(default_evaluator, 'fractional_abs_error')
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 0.8
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_abs_error'])
.. testoutput::
0.2222...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_num_examples", "_sum_of_errors")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError(
"FractionalAbsoluteError must have at least one example before it can be computed."
)
return self._sum_of_errors.item() / self._num_examples
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class ManhattanDistance(_BaseRegression):
r"""Calculates the Manhattan Distance.
.. math::
\text{MD} = \sum_{j=1}^n |A_j - P_j|
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `scikit-learn distance metrics`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = ManhattanDistance()
metric.attach(default_evaluator, 'manhattan')
y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['manhattan'])
.. testoutput::
3.75...
.. versionchanged:: 0.4.3
- Fixed sklearn compatibility.
- Workes with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
errors = torch.abs(y - y_pred)
self._sum_of_errors += torch.sum(errors).to(self._device)
@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
return self._sum_of_errors.item()
|
from typing import Tuple
import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class MeanNormalizedBias(_BaseRegression):
r"""Calculates the Mean Normalized Bias.
.. math::
\text{MNB} = \frac{1}{n}\sum_{j=1}^n\frac{A_j - P_j}{A_j}
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in the reference `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
Parameters are inherited from ``Metric.__init__``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanNormalizedBias()
metric.attach(default_evaluator, 'mnb')
y_true = torch.tensor([1., 2., 3., 4., 5.])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mnb'])
.. testoutput::
0.25...
.. versionchanged:: 0.4.5
- Works with DDP.
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if (y == 0).any():
raise NotComputableError("The ground truth has 0.")
errors = (y.view_as(y_pred) - y_pred) / y
self._sum_of_errors += torch.sum(errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_errors", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("MeanNormalizedBias must have at least one example before it can be computed.")
return self._sum_of_errors.item() / self._num_examples
|
from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer, Tbptt_Events
|
import numbers
import warnings
from functools import partial
from typing import Any, Callable, cast, Dict, Iterable, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data.distributed import DistributedSampler
# https://github.com/pytorch/ignite/issues/2773
try:
from torch.optim.lr_scheduler import LRScheduler as PyTorchLRScheduler
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as PyTorchLRScheduler
import ignite.distributed as idist
from ignite.contrib.handlers import (
ClearMLLogger,
global_step_from_engine,
MLflowLogger,
NeptuneLogger,
PolyaxonLogger,
ProgressBar,
TensorboardLogger,
VisdomLogger,
WandBLogger,
)
from ignite.contrib.handlers.base_logger import BaseLogger
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, TerminateOnNan
from ignite.handlers.checkpoint import BaseSaveHandler
from ignite.handlers.param_scheduler import ParamScheduler
from ignite.metrics import RunningAverage
from ignite.metrics.metric import RunningBatchWise
from ignite.utils import deprecated
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.terminate_on_nan.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.checkpoint.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.checkpoint.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.terminate_on_nan.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
setup_common_distrib_training_handlers = setup_common_training_handlers
def _setup_common_training_handlers(
trainer: Engine,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
if output_path is not None and save_handler is not None:
raise ValueError(
"Arguments output_path and save_handler are mutually exclusive. Please, define only one of them"
)
if stop_on_nan:
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
if lr_scheduler is not None:
if isinstance(lr_scheduler, PyTorchLRScheduler):
trainer.add_event_handler(
Events.ITERATION_COMPLETED, lambda engine: cast(PyTorchLRScheduler, lr_scheduler).step()
)
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
if torch.cuda.is_available() and clear_cuda_cache:
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
if to_save is not None:
if output_path is None and save_handler is None:
raise ValueError(
"If to_save argument is provided then output_path or save_handler arguments should be also defined"
)
if output_path is not None:
save_handler = DiskSaver(dirname=output_path, require_empty=False)
checkpoint_handler = Checkpoint(
to_save, cast(Union[Callable, BaseSaveHandler], save_handler), filename_prefix="training", **kwargs
)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler)
if with_gpu_stats:
GpuInfo().attach(
trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) # type: ignore[arg-type]
)
if output_names is not None:
def output_transform(x: Any, index: int, name: str) -> Any:
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise TypeError(
"Unhandled type of update_function's output. "
f"It should either mapping or sequence, but given {type(x)}"
)
for i, n in enumerate(output_names):
RunningAverage(output_transform=partial(output_transform, index=i, name=n)).attach(
trainer, n, usage=RunningBatchWise()
)
if with_pbars:
if with_pbar_on_iters:
ProgressBar(persist=False).attach(
trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)
)
ProgressBar(persist=True, bar_format="").attach(
trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED
)
def _setup_common_distrib_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, PyTorchLRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
_setup_common_training_handlers(
trainer,
to_save=to_save,
output_path=output_path,
save_every_iters=save_every_iters,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=(idist.get_rank() == 0) and with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
if train_sampler is not None:
if not isinstance(train_sampler, DistributedSampler):
raise TypeError("Train sampler should be torch DistributedSampler and have `set_epoch` method")
@trainer.on(Events.EPOCH_STARTED)
def distrib_set_epoch(engine: Engine) -> None:
train_sampler.set_epoch(engine.state.epoch - 1)
def empty_cuda_cache(_: Engine) -> None:
torch.cuda.empty_cache()
import gc
gc.collect()
@deprecated(
"0.4.0",
"0.6.0",
("Please use instead: setup_tb_logging, setup_visdom_logging or setup_mlflow_logging etc.",),
raise_exception=True,
)
def setup_any_logging(
logger: BaseLogger,
logger_module: Any,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer], Dict[None, Optimizer]]],
evaluators: Optional[Union[Engine, Dict[str, Engine]]],
log_every_iters: int,
) -> None:
pass
def _setup_logging(
logger: BaseLogger,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer], Dict[None, Optimizer]]],
evaluators: Optional[Union[Engine, Dict[str, Engine]]],
log_every_iters: int,
) -> None:
if optimizers is not None:
if not isinstance(optimizers, (Optimizer, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if evaluators is not None:
if not isinstance(evaluators, (Engine, Mapping)):
raise TypeError("Argument evaluators should be either a single engine or a dictionary or engines")
if log_every_iters is None:
log_every_iters = 1
logger.attach_output_handler(
trainer, event_name=Events.ITERATION_COMPLETED(every=log_every_iters), tag="training", metric_names="all"
)
if optimizers is not None:
# Log optimizer parameters
if isinstance(optimizers, Optimizer):
optimizers = {None: optimizers}
for k, optimizer in optimizers.items():
logger.attach_opt_params_handler(
trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=k
)
if evaluators is not None:
# Log evaluation metrics
if isinstance(evaluators, Engine):
evaluators = {"validation": evaluators}
event_name = Events.ITERATION_COMPLETED if isinstance(logger, WandBLogger) else None
gst = global_step_from_engine(trainer, custom_event_name=event_name)
for k, evaluator in evaluators.items():
logger.attach_output_handler(
evaluator, event_name=Events.COMPLETED, tag=k, metric_names="all", global_step_transform=gst
)
def setup_tb_logging(
output_path: str,
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> TensorboardLogger:
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
output_path: logging directory path
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.tensorboard_logger.TensorboardLogger`
"""
logger = TensorboardLogger(log_dir=output_path, **kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_visdom_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> VisdomLogger:
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger`
"""
logger = VisdomLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_mlflow_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> MLflowLogger:
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.mlflow_logger.MLflowLogger`
"""
logger = MLflowLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_neptune_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> NeptuneLogger:
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.neptune_logger.NeptuneLogger`
"""
logger = NeptuneLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_wandb_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> WandBLogger:
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.wandb_logger.WandBLogger`
"""
logger = WandBLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_plx_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> PolyaxonLogger:
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.polyaxon_logger.PolyaxonLogger`
"""
logger = PolyaxonLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_clearml_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> ClearMLLogger:
"""Method to setup ClearML logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer: trainer engine
optimizers: single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators: single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters: interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`
"""
logger = ClearMLLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
def setup_trains_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any,
) -> ClearMLLogger:
"""``setup_trains_logging`` was renamed to :func:`~ignite.contrib.engines.common.setup_clearml_logging`."""
warnings.warn("setup_trains_logging was renamed to setup_clearml_logging.")
return setup_clearml_logging(trainer, optimizers, evaluators, log_every_iters, **kwargs)
get_default_score_fn = Checkpoint.get_default_score_fn
def gen_save_best_models_by_val_score(
save_handler: Union[Callable, BaseSaveHandler],
evaluator: Engine,
models: Union[torch.nn.Module, Dict[str, torch.nn.Module]],
metric_name: str,
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
score_sign: float = 1.0,
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Models with highest metric value will be retained. The logic of how to store objects is delegated to
``save_handler``.
Args:
save_handler: Method or callable class to
use to save engine and other provided objects. Function receives two objects: checkpoint as a dictionary
and filename. If ``save_handler`` is callable class, it can
inherit of :class:`~ignite.handlers.checkpoint.BaseSaveHandler` and optionally implement ``remove`` method
to keep a fixed number of saved checkpoints. In case if user needs to save engine's checkpoint on a disk,
``save_handler`` can be defined with :class:`~ignite.handlers.DiskSaver`.
evaluator: evaluation engine used to provide the score
models: model or dictionary with the object to save. Objects should have
implemented ``state_dict`` and ``load_state_dict`` methods.
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved: number of best models to store
trainer: trainer engine to fetch the epoch when saving the best model.
tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
Returns:
A :class:`~ignite.handlers.checkpoint.Checkpoint` handler.
"""
global_step_transform = None
if trainer is not None:
global_step_transform = global_step_from_engine(trainer)
if isinstance(models, nn.Module):
to_save: Dict[str, nn.Module] = {"model": models}
else:
to_save = models
best_model_handler = Checkpoint(
to_save,
save_handler,
filename_prefix="best",
n_saved=n_saved,
global_step_transform=global_step_transform,
score_name=f"{tag}_{metric_name.lower()}",
score_function=get_default_score_fn(metric_name, score_sign=score_sign),
**kwargs,
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
return best_model_handler
def save_best_model_by_val_score(
output_path: str,
evaluator: Engine,
model: torch.nn.Module,
metric_name: str,
n_saved: int = 3,
trainer: Optional[Engine] = None,
tag: str = "val",
score_sign: float = 1.0,
**kwargs: Any,
) -> Checkpoint:
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
Models with highest metric value will be retained.
Args:
output_path: output path to indicate where to save best models
evaluator: evaluation engine used to provide the score
model: model to store
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved: number of best models to store
trainer: trainer engine to fetch the epoch when saving the best model.
tag: score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
Returns:
A :class:`~ignite.handlers.checkpoint.Checkpoint` handler.
"""
return gen_save_best_models_by_val_score(
save_handler=DiskSaver(dirname=output_path, require_empty=False),
evaluator=evaluator,
models=model,
metric_name=metric_name,
n_saved=n_saved,
trainer=trainer,
tag=tag,
score_sign=score_sign,
**kwargs,
)
def add_early_stopping_by_val_score(
patience: int,
evaluator: Engine,
trainer: Engine,
metric_name: str,
score_sign: float = 1.0,
) -> EarlyStopping:
"""Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.
Metric value should increase in order to keep training and not early stop.
Args:
patience: number of events to wait if no improvement and then stop the training.
evaluator: evaluation engine used to provide the score
trainer: trainer engine to stop the run if no improvement.
metric_name: metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
Returns:
A :class:`~ignite.handlers.early_stopping.EarlyStopping` handler.
"""
es_handler = EarlyStopping(
patience=patience, score_function=get_default_score_fn(metric_name, score_sign=score_sign), trainer=trainer
)
evaluator.add_event_handler(Events.COMPLETED, es_handler)
return es_handler
|
# coding: utf-8
import collections.abc as collections
from typing import Callable, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from ignite.engine import _prepare_batch, Engine, EventEnum
from ignite.utils import apply_to_tensor
class Tbptt_Events(EventEnum):
"""Aditional tbptt events.
Additional events for truncated backpropagation throught time dedicated
trainer.
"""
TIME_ITERATION_STARTED = "time_iteration_started"
TIME_ITERATION_COMPLETED = "time_iteration_completed"
def _detach_hidden(
hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Cut backpropagation graph.
Auxillary function to cut the backpropagation graph by detaching the hidden
vector.
"""
return apply_to_tensor(hidden, torch.Tensor.detach)
def create_supervised_tbptt_trainer(
model: nn.Module,
optimizer: Optimizer,
loss_fn: nn.Module,
tbtt_step: int,
dim: int = 0,
device: Optional[str] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
) -> Engine:
"""Create a trainer for truncated backprop through time supervised models.
Training recurrent model on long sequences is computationally intensive as
it requires to process the whole sequence before getting a gradient.
However, when the training loss is computed over many outputs
(`X to many <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`_),
there is an opportunity to compute a gradient over a subsequence. This is
known as
`truncated backpropagation through time <https://machinelearningmastery.com/
gentle-introduction-backpropagation-time/>`_.
This supervised trainer apply gradient optimization step every `tbtt_step`
time steps of the sequence, while backpropagating through the same
`tbtt_step` time steps.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
tbtt_step: the length of time chunks (last one may be smaller).
dim: axis representing the time dimension.
device: device type specification (default: None).
Applies to batches.
non_blocking: if True and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
prepare_batch: function that receives `batch`, `device`,
`non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`.
Returns:
a trainer engine with supervised update function.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
* `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
* `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
"""
def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> float:
loss_list = []
hidden = None
x, y = batch
for batch_t in zip(x.split(tbtt_step, dim=dim), y.split(tbtt_step, dim=dim)):
x_t, y_t = prepare_batch(batch_t, device=device, non_blocking=non_blocking)
# Fire event for start of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_STARTED)
# Forward, backward and
model.train()
optimizer.zero_grad()
if hidden is None:
y_pred_t, hidden = model(x_t)
else:
hidden = _detach_hidden(hidden)
y_pred_t, hidden = model(x_t, hidden)
loss_t = loss_fn(y_pred_t, y_t)
loss_t.backward()
optimizer.step()
# Setting state of engine for consistent behaviour
engine.state.output = loss_t.item()
loss_list.append(loss_t.item())
# Fire event for end of iteration
engine.fire_event(Tbptt_Events.TIME_ITERATION_COMPLETED)
# return average loss over the time splits
return sum(loss_list) / len(loss_list)
engine = Engine(_update)
engine.register_events(*Tbptt_Events)
return engine
|
""" ``ignite.contrib.handlers.param_scheduler`` was moved to ``ignite.handlers.param_scheduler``.
Note:
``ignite.contrib.handlers.param_scheduler`` was moved to ``ignite.handlers.param_scheduler``.
Please refer to :mod:`~ignite.handlers.param_scheduler`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/param_scheduler.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
CyclicalScheduler,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
)
__all__ = [
"ConcatScheduler",
"CosineAnnealingScheduler",
"LinearCyclicalScheduler",
"LRScheduler",
"ParamGroupScheduler",
"ParamScheduler",
"PiecewiseLinear",
"CyclicalScheduler",
"create_lr_scheduler_with_warmup",
]
ConcatScheduler = ConcatScheduler
CosineAnnealingScheduler = CosineAnnealingScheduler
LinearCyclicalScheduler = LinearCyclicalScheduler
LRScheduler = LRScheduler
ParamGroupScheduler = ParamGroupScheduler
ParamScheduler = ParamScheduler
PiecewiseLinear = PiecewiseLinear
CyclicalScheduler = CyclicalScheduler
create_lr_scheduler_with_warmup = create_lr_scheduler_with_warmup
|
"""MLflow logger and its helper handlers."""
import warnings
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["MLflowLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class MLflowLogger(BaseLogger):
"""
`MLflow <https://mlflow.org>`_ tracking client handler to log parameters and metrics during the training
and validation.
This class requires `mlflow package <https://github.com/mlflow/mlflow/>`_ to be installed:
.. code-block:: bash
pip install mlflow
Args:
tracking_uri: MLflow tracking uri. See MLflow docs for more details
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Log experiment parameters:
mlflow_logger.log_params({
"seed": seed,
"batch_size": batch_size,
"model": model.__class__.__name__,
"pytorch version": torch.__version__,
"ignite version": ignite.__version__,
"cuda version": torch.version.cuda,
"device name": torch.cuda.get_device_name(0)
})
# Attach the logger to the trainer to log training loss at each iteration
mlflow_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {'loss': loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
mlflow_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
mlflow_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
"""
def __init__(self, tracking_uri: Optional[str] = None):
try:
import mlflow
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires mlflow to be installed. "
"Please install it with command: \n pip install mlflow"
)
if tracking_uri is not None:
mlflow.set_tracking_uri(tracking_uri)
self.active_run = mlflow.active_run()
if self.active_run is None:
self.active_run = mlflow.start_run()
def __getattr__(self, attr: Any) -> Any:
import mlflow
return getattr(mlflow, attr)
def close(self) -> None:
import mlflow
mlflow.end_run()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, 'training'
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.mlflow_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
mlflow_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
mlflow_logger = MLflowLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on MLflow.
mlflow_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
mlflow_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
) -> None:
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, MLflowLogger):
raise TypeError("Handler 'OutputHandler' works only with MLflowLogger")
rendered_metrics = self._setup_output_metrics_state_attrs(engine)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
# Additionally recheck metric names as MLflow rejects non-valid names with MLflowException
from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES
metrics = {}
for keys, value in rendered_metrics.items():
key = " ".join(keys)
metrics[key] = value
for key in list(metrics.keys()):
if not _VALID_PARAM_AND_METRIC_NAMES.match(key):
warnings.warn(
f"MLflowLogger output_handler encountered an invalid metric name '{key}' that "
"will be ignored and not logged to MLflow"
)
del metrics[key]
logger.log_metrics(metrics, step=global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, 'generator'
Examples:
.. code-block:: python
from ignite.contrib.handlers.mlflow_logger import *
# Create a logger
mlflow_logger = MLflowLogger()
# Optionally, user can specify tracking_uri with corresponds to MLFLOW_TRACKING_URI
# mlflow_logger = MLflowLogger(tracking_uri="uri")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
mlflow_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
mlflow_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, MLflowLogger):
raise TypeError("Handler OptimizerParamsHandler works only with MLflowLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag} " if self.tag else ""
params = {
f"{tag_prefix}{self.param_name} group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
logger.log_metrics(params, step=global_step)
|
"""Polyaxon logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["PolyaxonLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class PolyaxonLogger(BaseLogger):
"""
`Polyaxon tracking client <https://polyaxon.com/>`_ handler to log parameters and metrics during the training
and validation.
This class requires `polyaxon <https://github.com/polyaxon/polyaxon/>`_ package to be installed:
.. code-block:: bash
pip install polyaxon
// If you are using polyaxon v0.x
pip install polyaxon-client
Args:
args: Positional arguments accepted from
`Experiment <https://polyaxon.com/docs/experimentation/tracking/client/>`_.
kwargs: Keyword arguments accepted from
`Experiment <https://polyaxon.com/docs/experimentation/tracking/client/>`_.
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Log experiment parameters:
plx_logger.log_inputs(**{
"seed": seed,
"batch_size": batch_size,
"model": model.__class__.__name__,
"pytorch version": torch.__version__,
"ignite version": ignite.__version__,
"cuda version": torch.version.cuda,
"device name": torch.cuda.get_device_name(0)
})
# Attach the logger to the trainer to log training loss at each iteration
plx_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
plx_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
plx_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# to manually end a run
plx_logger.close()
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
from polyaxon.tracking import Run
self.experiment = Run(*args, **kwargs)
except ImportError:
try:
from polyaxon_client.tracking import Experiment
self.experiment = Experiment(*args, **kwargs)
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires polyaxon to be installed.\n"
"For Polyaxon v1.x please install it with command: \n pip install polyaxon\n"
"For Polyaxon v0.x please install it with command: \n pip install polyaxon-client"
)
def close(self) -> None:
try:
self.experiment.end()
except:
pass
def __getattr__(self, attr: Any) -> Any:
return getattr(self.experiment, attr)
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.polyaxon_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
plx_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
plx_logger = PolyaxonLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Polyaxon.
plx_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
plx_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, PolyaxonLogger):
raise RuntimeError("Handler 'OutputHandler' works only with PolyaxonLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
metrics.update({"step": global_step})
logger.log_metrics(**metrics)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.polyaxon_logger import *
# Create a logger
plx_logger = PolyaxonLogger()
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
plx_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
plx_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, PolyaxonLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with PolyaxonLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
params["step"] = global_step
logger.log_metrics(**params)
|
"""TensorBoard logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = [
"TensorboardLogger",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"WeightsHistHandler",
"GradsScalarHandler",
"GradsHistHandler",
"global_step_from_engine",
]
class TensorboardLogger(BaseLogger):
"""
TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.
By default, this class favors `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package if installed:
.. code-block:: bash
pip install tensorboardX
otherwise, it falls back to using
`PyTorch's SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_
(>=v1.2.0).
Args:
args: Positional arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
kwargs: Keyword arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
For example, `log_dir` to setup path to the directory where to log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
tb_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
# Attach the logger to the trainer to log model's weights as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=WeightsHistHandler(model)
)
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model)
)
# Attach the logger to the trainer to log model's gradients as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=GradsHistHandler(model)
)
# We need to close the logger when we are done
tb_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
self.writer = SummaryWriter(*args, **kwargs)
def __getattr__(self, attr: Any) -> Any:
return getattr(self.writer, attr)
def close(self) -> None:
self.writer.close()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output, engine's state attributes and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.tensorboard_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
tb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Tensorboard.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
tb_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'OutputHandler' works only with TensorboardLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
logger.writer.add_scalar(key, value, global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger.writer.add_scalar(k, v, global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log only `fc` weights
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
name = name.replace(".", "/")
logger.writer.add_scalar(
f"{tag_prefix}weights_{self.reduction.__name__}/{name}",
self.reduction(p.data),
global_step,
)
class WeightsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's weights as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights of `fc` layer
weights = ['fc']
# Attach the logger to the trainer to log weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weights)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log weights which name include 'conv'.
weight_selector = lambda name, p: 'conv' in name
# Attach the logger to the trainer to log weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weight_selector)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
name = name.replace(".", "/")
logger.writer.add_histogram(
tag=f"{tag_prefix}weights/{name}", values=p.data.cpu().numpy(), global_step=global_step
)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of `base`
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_scalar(
f"{tag_prefix}grads_{self.reduction.__name__}/{name}", self.reduction(p.grad), global_step
)
class GradsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's gradients as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of `fc.bias`
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=['fc.bias'])
)
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Log gradient of weights which have shape (2, 1)
def has_shape_2_1(n, p):
return p.shape == (2,1)
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_histogram(
tag=f"{tag_prefix}grads/{name}", values=p.grad.cpu().numpy(), global_step=global_step
)
|
"""WandB logger and its helper handlers."""
from typing import Any, Callable, List, Optional, Union
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOptimizerParamsHandler, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = ["WandBLogger", "OutputHandler", "OptimizerParamsHandler", "global_step_from_engine"]
class WandBLogger(BaseLogger):
"""`Weights & Biases <https://wandb.ai/site>`_ handler to log metrics, model/optimizer parameters, gradients
during training and validation. It can also be used to log model checkpoints to the Weights & Biases cloud.
.. code-block:: bash
pip install wandb
This class is also a wrapper for the wandb module. This means that you can call any wandb function using
this wrapper. See examples on how to save model parameters and gradients.
Args:
args: Positional arguments accepted by `wandb.init`.
kwargs: Keyword arguments accepted by `wandb.init`.
Please see `wandb.init <https://docs.wandb.ai/library/init>`_ for documentation of possible parameters.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the trainer to log training loss at each iteration
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=lambda *_: trainer.state.iteration` to take iteration value
# of the `trainer`:
wandb_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=lambda *_: trainer.state.iteration` to take iteration value
# of the `trainer` instead of `evaluator`.
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
wandb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# We need to close the logger when we are done
wandb_logger.close()
If you want to log model gradients, the model call graph, etc., use the logger as wrapper of wandb. Refer
to the documentation of wandb.watch for details:
.. code-block:: python
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
model = torch.nn.Sequential(...)
wandb_logger.watch(model)
For model checkpointing, Weights & Biases creates a local run dir, and automatically synchronizes all
files saved there at the end of the run. You can just use the `wandb_logger.run.dir` as path for the
`ModelCheckpoint`:
.. code-block:: python
from ignite.handlers import ModelCheckpoint
def score_function(engine):
return engine.state.metrics['accuracy']
model_checkpoint = ModelCheckpoint(
wandb_logger.run.dir, n_saved=2, filename_prefix='best',
require_empty=False, score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {'model': model})
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
import wandb
self._wandb = wandb
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires wandb to be installed. "
"You man install wandb with the command:\n pip install wandb\n"
)
if kwargs.get("init", True):
wandb.init(*args, **kwargs)
def __getattr__(self, attr: Any) -> Any:
return getattr(self._wandb, attr)
def close(self) -> None:
self._wandb.finish()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.wandb_logger.global_step_from_engine`.
sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=lambda *_: trainer.state.iteration,` to take iteration value
# of the `trainer`:
wandb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=lambda *_: trainer.state.iteration,
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Weights & Biases.
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
sync: Optional[bool] = None,
state_attributes: Optional[List[str]] = None,
):
super().__init__(tag, metric_names, output_transform, global_step_transform, state_attributes)
self.sync = sync
def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, WandBLogger):
raise RuntimeError(f"Handler '{self.__class__.__name__}' works only with WandBLogger.")
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
metrics = self._setup_output_metrics_state_attrs(engine, log_text=True, key_tuple=False)
logger.log(metrics, step=global_step, sync=self.sync)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.wandb_logger import *
# Create a logger. All parameters are optional. See documentation
# on wandb.init for details.
wandb_logger = WandBLogger(
entity="shared",
project="pytorch-ignite-integration",
name="cnn-mnist",
config={"max_epochs": 10},
tags=["pytorch-ignite", "minst"]
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
wandb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
wandb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
self.sync = sync
def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, WandBLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with WandBLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
logger.log(params, step=global_step, sync=self.sync)
|
"""Visdom logger and its helper handlers."""
import os
from typing import Any, Callable, cast, Dict, List, Optional, Union
import torch
import torch.nn as nn
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
__all__ = [
"VisdomLogger",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"GradsScalarHandler",
"global_step_from_engine",
]
class VisdomLogger(BaseLogger):
"""
VisdomLogger handler to log metrics, model/optimizer parameters, gradients during the training and validation.
This class requires `visdom <https://github.com/fossasia/visdom/>`_ package to be installed:
.. code-block:: bash
pip install git+https://github.com/fossasia/visdom.git
Args:
server: visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL`
port: visdom server's port. It can be also specified by environment variable `VISDOM_PORT`
num_workers: number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to
visdom server. Default, `num_workers=1`. If `num_workers=0` and logger uses the main thread. If using
Python 2.7 and `num_workers>0` the package `futures` should be installed: `pip install futures`
kwargs: kwargs to pass into
`visdom.Visdom <https://github.com/fossasia/visdom#visdom-arguments-python-only>`_.
Note:
We can also specify username/password using environment variables: VISDOM_USERNAME, VISDOM_PASSWORD
.. warning::
Frequent logging, e.g. when logger is attached to `Events.ITERATION_COMPLETED`, can slow down the run if the
main thread is used to send the data to visdom server (`num_workers=0`). To avoid this situation we can either
log less frequently or set `num_workers=1`.
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log training loss at each iteration
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
vd_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
vd_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
# Attach the logger to the trainer to log model's gradients norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model)
)
# We need to close the logger with we are done
vd_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
with VisdomLogger() as vd_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
vd_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
server: Optional[str] = None,
port: Optional[int] = None,
num_workers: int = 1,
raise_exceptions: bool = True,
**kwargs: Any,
):
try:
import visdom
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires visdom package. "
"Please install it with command:\n"
"pip install git+https://github.com/fossasia/visdom.git"
)
if num_workers > 0:
# If visdom is installed, one of its dependencies `tornado`
# requires also `futures` to be installed.
# Let's check anyway if we can import it.
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires concurrent.futures module"
"Please install it with command:\n"
"pip install futures"
)
if server is None:
server = cast(str, os.environ.get("VISDOM_SERVER_URL", "localhost"))
if port is None:
port = int(os.environ.get("VISDOM_PORT", 8097))
if "username" not in kwargs:
username = os.environ.get("VISDOM_USERNAME", None)
kwargs["username"] = username
if "password" not in kwargs:
password = os.environ.get("VISDOM_PASSWORD", None)
kwargs["password"] = password
self.vis = visdom.Visdom(server=server, port=port, raise_exceptions=raise_exceptions, **kwargs)
if not self.vis.offline and not self.vis.check_connection(): # type: ignore[attr-defined]
raise RuntimeError(f"Failed to connect to Visdom server at {server}. Did you run python -m visdom.server ?")
self.executor: Union[_DummyExecutor, "ThreadPoolExecutor"] = _DummyExecutor()
if num_workers > 0:
self.executor = ThreadPoolExecutor(max_workers=num_workers)
def _save(self) -> None:
self.vis.save([self.vis.env]) # type: ignore[attr-defined]
def close(self) -> None:
self.executor.shutdown()
self.vis.close()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class _BaseVisDrawer:
def __init__(self, show_legend: bool = False):
self.windows: Dict[str, Any] = {}
self.show_legend = show_legend
def add_scalar(
self, logger: VisdomLogger, k: str, v: Union[str, float, torch.Tensor], event_name: Any, global_step: int
) -> None:
"""
Helper method to log a scalar with VisdomLogger.
Args:
logger: visdom logger
k: scalar name which is used to set window title and y-axis label
v: scalar value, y-axis value
event_name: Event name which is used to setup x-axis label. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
global_step: global step, x-axis value
"""
if k not in self.windows:
self.windows[k] = {
"win": None,
"opts": {"title": k, "xlabel": str(event_name), "ylabel": k, "showlegend": self.show_legend},
}
update = None if self.windows[k]["win"] is None else "append"
kwargs = {
"X": [global_step],
"Y": [v],
"env": logger.vis.env, # type: ignore[attr-defined]
"win": self.windows[k]["win"],
"update": update,
"opts": self.windows[k]["opts"],
"name": k,
}
future = logger.executor.submit(logger.vis.line, **kwargs)
if self.windows[k]["win"] is None:
self.windows[k]["win"] = future.result()
class OutputHandler(BaseOutputHandler, _BaseVisDrawer):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.visdom_logger.global_step_from_engine`.
show_legend: flag to show legend in the window
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
vd_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
vd_logger = VisdomLogger()
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Visdom.
vd_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
vd_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
"""
def __init__(
self,
tag: str,
metric_names: Optional[str] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
show_legend: bool = False,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'OutputHandler' works only with VisdomLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
self.add_scalar(logger, key, value, event_name, global_step)
logger._save()
class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vb_logger = VisdomLogger()
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
vd_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
vd_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""Helper handler to log model's weights as scalars.
Handler iterates over named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.model.named_parameters():
name = name.replace(".", "/")
k = f"{tag_prefix}weights_{self.reduction.__name__}/{name}"
v = self.reduction(p.data)
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""Helper handler to log model's gradients as scalars.
Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
show_legend: flag to show legend in the window
Examples:
.. code-block:: python
from ignite.contrib.handlers.visdom_logger import *
# Create a logger
vd_logger = VisdomLogger()
# Attach the logger to the trainer to log model's weights norm after each iteration
vd_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(GradsScalarHandler, self).__init__(model, reduction, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
def __call__(self, engine: Engine, logger: VisdomLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, VisdomLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with VisdomLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
k = f"{tag_prefix}grads_{self.reduction.__name__}/{name}"
v = self.reduction(p.grad)
self.add_scalar(logger, k, v, event_name, global_step)
logger._save()
class _DummyExecutor:
class _DummyFuture:
def __init__(self, result: Any) -> None:
self._output = result
def result(self) -> Any:
return self._output
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
def submit(self, fn: Callable, **kwargs: Any) -> "_DummyFuture":
return _DummyExecutor._DummyFuture(fn(**kwargs))
def shutdown(self, *args: Any, **kwargs: Any) -> None:
pass
|
""" ``ignite.contrib.handlers.lr_finder`` was moved to ``ignite.handlers.lr_finder``.
Note:
``ignite.contrib.handlers.lr_finder`` was moved to ``ignite.handlers.lr_finder``.
Please refer to :mod:`~ignite.handlers.lr_finder`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/lr_finder.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.lr_finder import FastaiLRFinder
__all__ = [
"FastaiLRFinder",
]
FastaiLRFinder = FastaiLRFinder
|
from ignite.contrib.handlers.clearml_logger import ClearMLLogger
from ignite.contrib.handlers.mlflow_logger import MLflowLogger
from ignite.contrib.handlers.neptune_logger import NeptuneLogger
from ignite.contrib.handlers.polyaxon_logger import PolyaxonLogger
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from ignite.contrib.handlers.visdom_logger import VisdomLogger
from ignite.contrib.handlers.wandb_logger import WandBLogger
from ignite.handlers import EpochOutputStore, global_step_from_engine # ref # ref
from ignite.handlers.lr_finder import FastaiLRFinder
from ignite.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
PiecewiseLinear,
)
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
|
"""Base logger and its helper handlers."""
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from torch.optim import Optimizer
from ignite.engine import Engine, Events, EventsList, State
from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle
class BaseHandler(metaclass=ABCMeta):
"""Base handler for defining various useful handlers."""
@abstractmethod
def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
pass
class BaseWeightsHandler(BaseHandler):
"""
Base handler for logging weights or their gradients.
"""
def __init__(
self,
model: nn.Module,
tag: Optional[str] = None,
whitelist: Optional[Union[List[str], Callable[[str, nn.Parameter], bool]]] = None,
):
if not isinstance(model, torch.nn.Module):
raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}")
self.model = model
self.tag = tag
weights = {}
if whitelist is None:
weights = dict(model.named_parameters())
elif callable(whitelist):
for n, p in model.named_parameters():
if whitelist(n, p):
weights[n] = p
else:
for n, p in model.named_parameters():
for item in whitelist:
if n.startswith(item):
weights[n] = p
self.weights = weights.items()
class BaseOptimizerParamsHandler(BaseHandler):
"""
Base handler for logging optimizer parameters
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
if not (
isinstance(optimizer, Optimizer)
or (hasattr(optimizer, "param_groups") and isinstance(optimizer.param_groups, Sequence))
):
raise TypeError(
"Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, "
f"but given {type(optimizer)}"
)
self.optimizer = optimizer
self.param_name = param_name
self.tag = tag
class BaseOutputHandler(BaseHandler):
"""
Helper handler to log engine's output and/or metrics
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
if metric_names is not None:
if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")):
raise TypeError(
f"metric_names should be either a list or equal 'all', got {type(metric_names)} instead."
)
if output_transform is not None and not callable(output_transform):
raise TypeError(f"output_transform should be a function, got {type(output_transform)} instead.")
if output_transform is None and metric_names is None and state_attributes is None:
raise ValueError("Either metric_names, output_transform or state_attributes should be defined")
if global_step_transform is not None and not callable(global_step_transform):
raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.")
if global_step_transform is None:
def global_step_transform(engine: Engine, event_name: Union[str, Events]) -> int:
return engine.state.get_event_attrib_value(event_name)
self.tag = tag
self.metric_names = metric_names
self.output_transform = output_transform
self.global_step_transform = global_step_transform
self.state_attributes = state_attributes
def _setup_output_metrics_state_attrs(
self, engine: Engine, log_text: Optional[bool] = False, key_tuple: Optional[bool] = True
) -> Dict[Any, Any]:
"""Helper method to setup metrics and state attributes to log"""
metrics_state_attrs = OrderedDict()
if self.metric_names is not None:
if isinstance(self.metric_names, str) and self.metric_names == "all":
metrics_state_attrs = OrderedDict(engine.state.metrics)
else:
for name in self.metric_names:
if name not in engine.state.metrics:
warnings.warn(
f"Provided metric name '{name}' is missing "
f"in engine's state metrics: {list(engine.state.metrics.keys())}"
)
continue
metrics_state_attrs[name] = engine.state.metrics[name]
if self.output_transform is not None:
output_dict = self.output_transform(engine.state.output)
if not isinstance(output_dict, dict):
output_dict = {"output": output_dict}
metrics_state_attrs.update(output_dict)
if self.state_attributes is not None:
metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})
metrics_state_attrs_dict: Dict[Any, Union[str, float, numbers.Number]] = OrderedDict()
def key_tuple_tf(tag: str, name: str, *args: str) -> Tuple[str, ...]:
return (tag, name) + args
def key_str_tf(tag: str, name: str, *args: str) -> str:
return "/".join((tag, name) + args)
key_tf = key_tuple_tf if key_tuple else key_str_tf
for name, value in metrics_state_attrs.items():
if isinstance(value, numbers.Number):
metrics_state_attrs_dict[key_tf(self.tag, name)] = value
elif isinstance(value, torch.Tensor) and value.ndimension() == 0:
metrics_state_attrs_dict[key_tf(self.tag, name)] = value.item()
elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
for i, v in enumerate(value):
metrics_state_attrs_dict[key_tf(self.tag, name, str(i))] = v.item()
else:
if isinstance(value, str) and log_text:
metrics_state_attrs_dict[key_tf(self.tag, name)] = value
else:
warnings.warn(f"Logger output_handler can not log metrics value type {type(value)}")
return metrics_state_attrs_dict
class BaseWeightsScalarHandler(BaseWeightsHandler):
"""
Helper handler to log model's weights or gradients as scalars.
"""
def __init__(
self,
model: nn.Module,
reduction: Callable[[torch.Tensor], Union[float, torch.Tensor]] = torch.norm,
tag: Optional[str] = None,
whitelist: Optional[Union[List[str], Callable[[str, nn.Parameter], bool]]] = None,
):
super(BaseWeightsScalarHandler, self).__init__(model, tag=tag, whitelist=whitelist)
if not callable(reduction):
raise TypeError(f"Argument reduction should be callable, but given {type(reduction)}")
def _is_0D_tensor(t: Any) -> bool:
return isinstance(t, torch.Tensor) and t.ndimension() == 0
# Test reduction function on a tensor
o = reduction(torch.ones(4, 2))
if not (isinstance(o, numbers.Number) or _is_0D_tensor(o)):
raise TypeError(f"Output of the reduction function should be a scalar, but got {type(o)}")
self.reduction = reduction
class BaseLogger(metaclass=ABCMeta):
"""
Base logger handler. See implementations: TensorboardLogger, VisdomLogger, PolyaxonLogger, MLflowLogger, ...
"""
def attach(
self,
engine: Engine,
log_handler: Callable,
event_name: Union[str, Events, CallableEventWithFilter, EventsList],
*args: Any,
**kwargs: Any,
) -> RemovableEventHandle:
"""Attach the logger to the engine and execute `log_handler` function at `event_name` events.
Args:
engine: engine object.
log_handler: a logging handler to execute
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name`
added by :meth:`~ignite.engine.engine.Engine.register_events`.
args: args forwarded to the `log_handler` method
kwargs: kwargs forwarded to the `log_handler` method
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
if isinstance(event_name, EventsList):
for name in event_name:
if name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{name}'")
engine.add_event_handler(name, log_handler, self, name)
return RemovableEventHandle(event_name, log_handler, engine)
else:
if event_name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{event_name}'")
return engine.add_event_handler(event_name, log_handler, self, event_name, *args, **kwargs)
def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any) -> RemovableEventHandle:
"""Shortcut method to attach `OutputHandler` to the logger.
Args:
engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
args: args to initialize `OutputHandler`
kwargs: kwargs to initialize `OutputHandler`
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name)
def attach_opt_params_handler(
self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any
) -> RemovableEventHandle:
"""Shortcut method to attach `OptimizerParamsHandler` to the logger.
Args:
engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
args: args to initialize `OptimizerParamsHandler`
kwargs: kwargs to initialize `OptimizerParamsHandler`
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
.. versionchanged:: 0.4.3
Added missing return statement.
"""
return self.attach(engine, self._create_opt_params_handler(*args, **kwargs), event_name=event_name)
@abstractmethod
def _create_output_handler(self, engine: Engine, *args: Any, **kwargs: Any) -> Callable:
pass
@abstractmethod
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable:
pass
def __enter__(self) -> "BaseLogger":
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
self.close()
def close(self) -> None:
pass
|
""" ``ignite.contrib.handlers.time_profilers.py`` was moved to ``ignite.handlers.time_profilers``.
Note:
``ignite.contrib.handlers.time_profilers`` was moved to ``ignite.handlers.time_profilers``.
Please refer to :mod:`~ignite.handlers.time_profilers`.
"""
import warnings
removed_in = "0.6.0"
deprecation_warning = (
f"{__file__} has been moved to /ignite/handlers/time_profilers.py"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
__all__ = [
"BasicTimeProfiler",
"HandlersTimeProfiler",
]
BasicTimeProfiler = BasicTimeProfiler
HandlersTimeProfiler = HandlersTimeProfiler
|
"""ClearML logger and its helper handlers."""
import os
import tempfile
import warnings
from collections import defaultdict
from datetime import datetime
from enum import Enum
from typing import Any, Callable, DefaultDict, List, Mapping, Optional, Tuple, Type, Union
from torch.optim import Optimizer
import ignite.distributed as idist
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
from ignite.handlers.checkpoint import DiskSaver
__all__ = [
"ClearMLLogger",
"ClearMLSaver",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"WeightsHistHandler",
"GradsScalarHandler",
"GradsHistHandler",
"global_step_from_engine",
]
class ClearMLLogger(BaseLogger):
"""
`ClearML <https://github.com/allegroai/clearml>`_ handler to log metrics, text, model/optimizer parameters,
plots during training and validation.
Also supports model checkpoints logging and upload to the storage solution of your choice (i.e. ClearML File server,
S3 bucket etc.)
.. code-block:: bash
pip install clearml
clearml-init
Args:
kwargs: Keyword arguments accepted from ``Task.init`` method.
All arguments are optional. If a ClearML Task has already been created,
kwargs will be ignored and the current ClearML Task will be used.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log training loss at each iteration
clearml_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
clearml_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
clearml_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
"""
def __init__(self, **kwargs: Any):
try:
from clearml import Task
from clearml.binding.frameworks.tensorflow_bind import WeightsGradientHistHelper
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
experiment_kwargs = {k: v for k, v in kwargs.items() if k not in ("project_name", "task_name", "task_type")}
if self.bypass_mode():
warnings.warn("ClearMLSaver: running in bypass mode")
# Try to retrieve current the ClearML Task before trying to create a new one
self._task = Task.current_task()
if self._task is None:
self._task = Task.init(
project_name=kwargs.get("project_name"),
task_name=kwargs.get("task_name"),
task_type=kwargs.get("task_type", Task.TaskTypes.training),
**experiment_kwargs,
)
self.clearml_logger = self._task.get_logger()
self.grad_helper = WeightsGradientHistHelper(logger=self.clearml_logger, report_freq=1)
@classmethod
def set_bypass_mode(cls, bypass: bool) -> None:
"""
Set ``clearml.Task`` to offline mode.
Will bypass all outside communication, and will save all data and logs to a local session folder.
Should only be used in "standalone mode", when there is no access to the *clearml-server*.
Args:
bypass: If ``True``, all outside communication is skipped.
Data and logs will be stored in a local session folder.
For more information, please refer to `ClearML docs
<https://clear.ml/docs/latest/docs/clearml_sdk/task_sdk/#offline-mode>`_.
"""
from clearml import Task
setattr(cls, "_bypass", bypass)
Task.set_offline(offline_mode=bypass)
@classmethod
def bypass_mode(cls) -> bool:
"""
Returns the bypass mode state.
Note:
`GITHUB_ACTIONS` env will automatically set bypass_mode to ``True``
unless overridden specifically with ``ClearMLLogger.set_bypass_mode(False)``.
For more information, please refer to `ClearML docs
<https://clear.ml/docs/latest/docs/clearml_sdk/task_sdk/#offline-mode>`_.
Return:
If True, ``clearml.Task`` is on offline mode, and all outside communication is skipped.
"""
return getattr(cls, "_bypass", bool(os.environ.get("CI")))
def __getattr__(self, attr: Any) -> Any:
"""
Calls the corresponding method of ``clearml.Logger``.
Args:
attr: methods of the ``clearml.Logger`` class.
"""
return getattr(self.clearml_logger, attr)
def get_task(self) -> Any:
"""
Returns the task context that the logger is reporting.
Return:
Returns the current task, equivalent to ``clearml.Task.current_task()``.
"""
return self._task
def close(self) -> None:
self.clearml_logger.flush()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.clearml_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
clearml_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on ClearML.
clearml_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
clearml_logger.attach(
trainer,
log_handler=OutputHandler(
tag="training",
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
),
event_name=Events.ITERATION_COMPLETED
)
Example of `global_step_transform`
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler OutputHandler works only with ClearMLLogger")
metrics = self._setup_output_metrics_state_attrs(engine)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
if len(key) == 2:
logger.clearml_logger.report_scalar(title=key[0], series=key[1], iteration=global_step, value=value)
elif len(key) == 3:
logger.clearml_logger.report_scalar(
title=f"{key[0]}/{key[1]}", series=key[2], iteration=global_step, value=value
)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
clearml_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
clearml_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
str(i): float(param_group[self.param_name]) for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}{self.param_name}", series=k, value=v, iteration=global_step
)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log only `fc` weights
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler WeightsScalarHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
title_name, _, series_name = name.partition(".")
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}weights_{self.reduction.__name__}/{title_name}",
series=series_name,
value=self.reduction(p.data),
iteration=global_step,
)
class WeightsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's weights as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, 'generator'
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights of `fc` layer
weights = ['fc']
# Attach the logger to the trainer to log weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weights)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log weights which name include 'conv'.
weight_selector = lambda name, p: 'conv' in name
# Attach the logger to the trainer to log weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model, whitelist=weight_selector)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
title_name, _, series_name = name.partition(".")
logger.grad_helper.add_histogram(
title=f"{tag_prefix}weights_{title_name}",
series=series_name,
step=global_step,
hist_data=p.data.cpu().numpy(),
)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of `base`
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler GradsScalarHandler works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
title_name, _, series_name = name.partition(".")
logger.clearml_logger.report_scalar(
title=f"{tag_prefix}grads_{self.reduction.__name__}/{title_name}",
series=series_name,
value=self.reduction(p.grad),
iteration=global_step,
)
class GradsHistHandler(BaseWeightsHandler):
"""Helper handler to log model's gradients as histograms.
Args:
model: model to log weights
tag: common title for all produced plots. For example, 'generator'
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
# Create a logger
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Attach the logger to the trainer to log model's weights norm after each iteration
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model)
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of `fc.bias`
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=['fc.bias'])
)
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
# Log gradient of weights which have shape (2, 1)
def has_shape_2_1(n, p):
return p.shape == (2,1)
clearml_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, ClearMLLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with ClearMLLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
title_name, _, series_name = name.partition(".")
logger.grad_helper.add_histogram(
title=f"{tag_prefix}grads_{title_name}",
series=series_name,
step=global_step,
hist_data=p.grad.cpu().numpy(),
)
class ClearMLSaver(DiskSaver):
"""
Handler that saves input checkpoint as ClearML artifacts
Args:
logger: An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`,
ensuring a valid ClearML ``Task`` has been initialized. If not provided, and a ClearML Task
has not been manually initialized, a runtime error will be raised.
output_uri: The default location for output models and other artifacts uploaded by ClearML. For
more information, see ``clearml.Task.init``.
dirname: Directory path where the checkpoint will be saved. If not provided, a temporary
directory will be created.
Examples:
.. code-block:: python
from ignite.contrib.handlers.clearml_logger import *
from ignite.handlers import Checkpoint
clearml_logger = ClearMLLogger(
project_name="pytorch-ignite-integration",
task_name="cnn-mnist"
)
to_save = {"model": model}
handler = Checkpoint(
to_save,
ClearMLSaver(),
n_saved=1,
score_function=lambda e: 123,
score_name="acc",
filename_prefix="best",
global_step_transform=global_step_from_engine(trainer)
)
validation_evaluator.add_event_handler(Events.EVENT_COMPLETED, handler)
"""
def __init__(
self,
logger: Optional[ClearMLLogger] = None,
output_uri: Optional[str] = None,
dirname: Optional[str] = None,
*args: Any,
**kwargs: Any,
):
self._setup_check_clearml(logger, output_uri)
if not dirname:
dirname = ""
if idist.get_rank() == 0:
dirname = tempfile.mkdtemp(prefix=f"ignite_checkpoints_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S_')}")
if idist.get_world_size() > 1:
dirname = idist.all_gather(dirname)[0] # type: ignore[index, assignment]
warnings.warn(f"ClearMLSaver created a temporary checkpoints directory: {dirname}")
idist.barrier()
# Let's set non-atomic tmp dir saving behaviour
if "atomic" not in kwargs:
kwargs["atomic"] = False
self._checkpoint_slots: DefaultDict[Union[str, Tuple[str, str]], List[Any]] = defaultdict(list)
super(ClearMLSaver, self).__init__(dirname=dirname, *args, **kwargs) # type: ignore[misc]
@idist.one_rank_only()
def _setup_check_clearml(self, logger: ClearMLLogger, output_uri: str) -> None:
try:
from clearml import Task
except ImportError:
try:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
if logger and not isinstance(logger, ClearMLLogger):
raise TypeError("logger must be an instance of ClearMLLogger")
self._task = Task.current_task()
if not self._task:
raise RuntimeError(
"ClearMLSaver requires a ClearML Task to be initialized. "
"Please use the `logger` argument or call `clearml.Task.init()`."
)
if output_uri:
self._task.output_uri = output_uri
class _CallbacksContext:
def __init__(
self,
callback_type: Type[Enum],
slots: List,
checkpoint_key: str,
filename: str,
basename: str,
metadata: Optional[Mapping] = None,
) -> None:
self._callback_type = callback_type
self._slots = slots
self._checkpoint_key = str(checkpoint_key)
self._filename = filename
self._basename = basename
self._metadata = metadata
def pre_callback(self, action: str, model_info: Any) -> Any:
if action != self._callback_type.save: # type: ignore[attr-defined]
return model_info
try:
slot = self._slots.index(None)
self._slots[slot] = model_info.upload_filename
except ValueError:
self._slots.append(model_info.upload_filename)
slot = len(self._slots) - 1
model_info.upload_filename = f"{self._basename}_{slot}{os.path.splitext(self._filename)[1]}"
model_info.local_model_id = f"{self._checkpoint_key}:{model_info.upload_filename}"
return model_info
def post_callback(self, action: str, model_info: Any) -> Any:
if action != self._callback_type.save: # type: ignore[attr-defined]
return model_info
model_info.model.name = f"{model_info.task.name}: {self._filename}"
prefix = "Checkpoint Metadata: "
metadata_items = ", ".join(f"{k}={v}" for k, v in self._metadata.items()) if self._metadata else "none"
metadata = f"{prefix}{metadata_items}"
comment = "\n".join(
metadata if line.startswith(prefix) else line for line in (model_info.model.comment or "").split("\n")
)
if prefix not in comment:
comment += "\n" + metadata
model_info.model.comment = comment
return model_info
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
try:
from clearml.binding.frameworks import WeightsFileHandler
except ImportError:
try:
# Backwards-compatibility for legacy Trains SDK
from trains.binding.frameworks import WeightsFileHandler
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires clearml to be installed. "
"You may install clearml using: \n pip install clearml \n"
)
try:
basename = metadata["basename"] # type: ignore[index]
except (TypeError, KeyError):
warnings.warn("Checkpoint metadata missing or basename cannot be found")
basename = "checkpoint"
checkpoint_key = (str(self.dirname), basename)
cb_context = self._CallbacksContext(
callback_type=WeightsFileHandler.CallbackType,
slots=self._checkpoint_slots[checkpoint_key],
checkpoint_key=str(checkpoint_key),
filename=filename,
basename=basename,
metadata=metadata,
)
pre_cb_id = WeightsFileHandler.add_pre_callback(cb_context.pre_callback)
post_cb_id = WeightsFileHandler.add_post_callback(cb_context.post_callback)
try:
super(ClearMLSaver, self).__call__(checkpoint, filename, metadata)
finally:
WeightsFileHandler.remove_pre_callback(pre_cb_id)
WeightsFileHandler.remove_post_callback(post_cb_id)
@idist.one_rank_only()
def get_local_copy(self, filename: str) -> Optional[str]:
"""Get artifact local copy.
.. warning::
In distributed configuration this method should be called on rank 0 process.
Args:
filename: artifact name.
Returns:
a local path to a downloaded copy of the artifact
"""
artifact = self._task.artifacts.get(filename)
if artifact:
return artifact.get_local_copy()
self._task.get_logger().report_text(f"Can not find artifact {filename}")
return None
@idist.one_rank_only()
def remove(self, filename: str) -> None:
super(ClearMLSaver, self).remove(filename)
for slots in self._checkpoint_slots.values():
try:
slots[slots.index(filename)] = None
except ValueError:
pass
else:
break
|
"""Neptune logger and its helper handlers."""
import tempfile
import warnings
from typing import Any, Callable, List, Mapping, Optional, Union
import torch
from torch.optim import Optimizer
import ignite.distributed as idist
from ignite import __version__
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, Events
from ignite.handlers import global_step_from_engine
from ignite.handlers.checkpoint import BaseSaveHandler
__all__ = [
"NeptuneLogger",
"NeptuneSaver",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"GradsScalarHandler",
"global_step_from_engine",
]
_INTEGRATION_VERSION_KEY = "source_code/integrations/neptune-pytorch-ignite"
class NeptuneLogger(BaseLogger):
"""
`Neptune <https://neptune.ai/>`_ handler to log metrics, model/optimizer parameters and gradients during training
and validation. It can also log model checkpoints to Neptune.
.. code-block:: bash
pip install neptune
Args:
api_token: Neptune API token, found on https://neptune.ai -> User menu -> "Get your API token".
If None, the value of the NEPTUNE_API_TOKEN environment variable is used. To keep your token
secure, you should set it to the environment variable rather than including it in your code.
project: Name of a Neptune project, in the form "workspace-name/project-name".
For example "tom/mnist-classification".
If None, the value of the NEPTUNE_PROJECT environment variable is used.
**kwargs: Other arguments to be passed to the `init_run()` function.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# Note: We are using the API token for anonymous logging. You can pass your own token, or save it as an
# environment variable and leave out the api_token argument.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project="common/pytorch-ignite-integration",
name="cnn-mnist", # Optional,
tags=["pytorch-ignite", "minst"], # Optional
)
# Attach the logger to the trainer to log training loss at each iteration.
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss},
)
# Attach the logger to the evaluator on the training dataset and log NLL
# and accuracy metrics after each epoch.
# We set up `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
npt_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL and accuracy metrics after
# each epoch. We set up `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the trainer to log optimizer parameters, such as learning rate at each iteration.
npt_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name="lr", # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration.
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model),
)
Explore runs with Neptune tracking here:
https://app.neptune.ai/o/common/org/pytorch-ignite-integration/
You can also save model checkpoints to a Neptune:
.. code-block:: python
from ignite.handlers import Checkpoint
def score_function(engine):
return engine.state.metrics["accuracy"]
to_save = {"model": model}
handler = Checkpoint(
to_save,
NeptuneSaver(npt_logger), n_saved=2,
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, handler)
It is also possible to use the logger as a context manager:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
with NeptuneLogger() as npt_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss},
)
"""
def __getattr__(self, attr: Any) -> Any:
return getattr(self.experiment, attr)
def __getitem__(self, key: str) -> Any:
return self.experiment[key]
def __setitem__(self, key: str, val: Any) -> Any:
self.experiment[key] = val
def __init__(self, api_token: Optional[str] = None, project: Optional[str] = None, **kwargs: Any) -> None:
try:
try:
# neptune-client<1.0.0 package structure
with warnings.catch_warnings():
# ignore the deprecation warnings
warnings.simplefilter("ignore")
import neptune.new as neptune
except ImportError:
# neptune>=1.0.0 package structure
import neptune
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires the Neptune client library to be installed. "
"Install neptune with the command: \n pip install neptune \n"
)
run = neptune.init_run(
api_token=api_token,
project=project,
**kwargs,
)
run[_INTEGRATION_VERSION_KEY] = __version__
self.experiment = run
def close(self) -> None:
self.experiment.stop()
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "OutputHandler":
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerParamsHandler":
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics.
Args:
tag: common title for all produced plots. For example, "training"
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.neptune_logger.global_step_from_engine`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
npt_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite", "minst"] # Optional
)
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on NeptuneML.
npt_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
npt_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
metrics=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
.. versionchanged:: 0.4.7
accepts an optional list of `state_attributes`
"""
def __init__(
self,
tag: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,
state_attributes: Optional[List[str]] = None,
):
super(OutputHandler, self).__init__(
tag, metric_names, output_transform, global_step_transform, state_attributes
)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler OutputHandler works only with NeptuneLogger")
metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
f"global_step must be int, got {type(global_step)}."
" Please check the output of global_step_transform."
)
for key, value in metrics.items():
logger[key].append(value, step=global_step)
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: parameter name
tag: common title for all produced plots. For example, "generator"
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
npt_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
npt_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler OptimizerParamsHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
params = {
f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger[k].append(v, step=global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by `whitelist`. Then at every call, applies
reduction function to each parameter, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific weights to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if it should be logged. Names should be fully-qualified.
For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's weights are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log only `fc` weights
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(
model,
whitelist=['fc']
)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log weights which have `bias` in their names
def has_bias_in_name(n, p):
return 'bias' in n
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler WeightsScalarHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
key = f"{tag_prefix}weights_{self.reduction.__name__}/{name}"
logger[key].append(self.reduction(p.data), step=global_step)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler, upon construction, iterates over named parameters of the model and keep
reference to ones permitted by the `whitelist`. Then at every call, applies
reduction function to each parameter's gradient, produces a scalar and logs it.
Args:
model: model to log weights
reduction: function to reduce parameters into scalar
tag: common title for all produced plots. For example, "generator"
whitelist: specific gradients to log. Should be list of model's submodules
or parameters names, or a callable which gets weight along with its name
and determines if its gradient should be logged. Names should be
fully-qualified. For more information please refer to `PyTorch docs
<https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.
If not given, all of model's gradients are logged.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log gradient of `base`
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(
model,
reduction=torch.norm,
whitelist=['base']
)
)
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
# Log gradient of weights which belong to a `fc` layer
def is_in_fc_layer(n, p):
return 'fc' in n
npt_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)
)
.. versionchanged:: 0.4.9
optional argument `whitelist` added.
"""
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
if not isinstance(logger, NeptuneLogger):
raise TypeError("Handler GradsScalarHandler works only with NeptuneLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = f"{self.tag}/" if self.tag else ""
for name, p in self.weights:
if p.grad is None:
continue
name = name.replace(".", "/")
key = f"{tag_prefix}grads_{self.reduction.__name__}/{name}"
logger[key].append(self.reduction(p.grad), step=global_step)
class NeptuneSaver(BaseSaveHandler):
"""Handler that saves input checkpoint to the Neptune server.
Args:
neptune_logger: an instance of
NeptuneLogger class.
.. Note ::
NeptuneSaver is currently not supported on Windows.
Examples:
.. code-block:: python
from ignite.contrib.handlers.neptune_logger import *
# Create a logger
# We are using the api_token for the anonymous user neptuner but you can use your own.
npt_logger = NeptuneLogger(
api_token="ANONYMOUS",
project_name="shared/pytorch-ignite-integration",
experiment_name="cnn-mnist", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-ignite","minst"] # Optional
)
...
evaluator = create_supervised_evaluator(model, metrics=metrics, ...)
...
from ignite.handlers import Checkpoint
def score_function(engine):
return engine.state.metrics["accuracy"]
to_save = {"model": model}
# pass neptune logger to NeptuneServer
handler = Checkpoint(
to_save,
NeptuneSaver(npt_logger), n_saved=2,
filename_prefix="best", score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, handler)
# We need to close the logger when we are done
npt_logger.close()
For example, you can access model checkpoints and download them from here:
https://ui.neptune.ai/o/shared/org/pytorch-ignite-integration/e/PYTOR1-18/charts
"""
@idist.one_rank_only()
def __init__(self, neptune_logger: NeptuneLogger):
self._logger = neptune_logger
@idist.one_rank_only()
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
# wont work on XLA
# Imports for BC compatibility
try:
# neptune-client<1.0.0 package structure
with warnings.catch_warnings():
# ignore the deprecation warnings
warnings.simplefilter("ignore")
from neptune.new.types import File
except ImportError:
# neptune>=1.0.0 package structure
from neptune.types import File
with tempfile.NamedTemporaryFile() as tmp:
# we can not use tmp.name to open tmp.file twice on Win32
# https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile
torch.save(checkpoint, tmp.file)
# rewind the buffer
tmp.file.seek(0)
# hold onto the file stream for uploading.
# NOTE: This won't load the whole file in memory and upload
# the stream in smaller chunks.
self._logger[filename].upload(File.from_stream(tmp.file))
@idist.one_rank_only(with_barrier=True)
def remove(self, filename: str) -> None:
del self._logger.experiment[filename]
|
# -*- coding: utf-8 -*-
"""TQDM logger."""
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Union
from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
from ignite.engine import Engine, Events
from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle
class ProgressBar(BaseLogger):
"""
TQDM progress bar handler to log training progress and computed metrics.
Args:
persist: set to ``True`` to persist the progress bar after completion (default = ``False``)
bar_format : Specify a custom bar string formatting. May impact performance.
[default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
tqdm_kwargs: kwargs passed to tqdm progress bar.
By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
number of epochs; however, if ``max_epochs`` are set to 1, the progress bar instead displays
"Iteration: [5/10]". If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
"Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
Examples:
Simple progress bar
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
pbar = ProgressBar()
pbar.attach(trainer)
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
Log output to a file instead of stderr (tqdm's default output)
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
log_file = open("output.log", "w")
pbar = ProgressBar(file=log_file)
pbar.attach(trainer)
Attach metrics that already have been computed at :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
(such as :class:`~ignite.metrics.RunningAverage`)
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
pbar = ProgressBar()
pbar.attach(trainer, ['loss'])
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Directly attach the engine's output
.. code-block:: python
trainer = create_supervised_trainer(model, optimizer, loss)
pbar = ProgressBar()
pbar.attach(trainer, output_transform=lambda x: {'loss': x})
# Progress bar will looks like
# Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``
are also logged along with the NLL and Accuracy after each iteration:
.. code-block:: python
pbar.attach(
trainer,
metric_names=["nll", "accuracy"],
state_attributes=["alpha", "beta"],
)
Note:
When attaching the progress bar to an engine, it is recommended that you replace
every print operation in the engine's handlers triggered every iteration with
``pbar.log_message`` to guarantee the correct format of the stdout.
Note:
When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
to an empty string value.
.. versionchanged:: 0.4.7
`attach` now accepts an optional list of `state_attributes`
"""
_events_order: List[Union[Events, CallableEventWithFilter]] = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
]
def __init__(
self,
persist: bool = False,
bar_format: Union[
str, None
] = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]",
**tqdm_kwargs: Any,
):
try:
from tqdm.autonotebook import tqdm
except ImportError:
raise ModuleNotFoundError(
"This contrib module requires tqdm to be installed. "
"Please install it with command: \n pip install tqdm"
)
self.pbar_cls = tqdm
self.pbar = None
self.persist = persist
self.bar_format = bar_format
self.tqdm_kwargs = tqdm_kwargs
def _reset(self, pbar_total: Optional[int]) -> None:
self.pbar = self.pbar_cls(
total=pbar_total, leave=self.persist, bar_format=self.bar_format, initial=1, **self.tqdm_kwargs
)
def _close(self, engine: Engine) -> None:
if self.pbar is not None:
# https://github.com/tqdm/notebook.py#L240-L250
# issue #1115 : notebook backend of tqdm checks if n < total (error or KeyboardInterrupt)
# and the bar persists in 'danger' mode
if self.pbar.total is not None:
self.pbar.n = self.pbar.total
self.pbar.close()
self.pbar = None
@staticmethod
def _compare_lt(
event1: Union[Events, CallableEventWithFilter], event2: Union[Events, CallableEventWithFilter]
) -> bool:
i1 = ProgressBar._events_order.index(event1)
i2 = ProgressBar._events_order.index(event2)
return i1 < i2
def log_message(self, message: str) -> None:
"""
Logs a message, preserving the progress bar correct output format.
Args:
message: string you wish to log.
"""
from tqdm import tqdm
tqdm.write(message, file=self.tqdm_kwargs.get("file", None))
def attach( # type: ignore[override]
self,
engine: Engine,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
event_name: Union[Events, CallableEventWithFilter] = Events.ITERATION_COMPLETED,
closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED,
state_attributes: Optional[List[str]] = None,
) -> None:
"""
Attaches the progress bar to an engine object.
Args:
engine: engine object.
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: a function to select what you want to print from the engine's
output. This function may return either a dictionary with entries in the format of ``{name: value}``,
or a single scalar, which will be displayed with the default name `output`.
event_name: event's name on which the progress bar advances. Valid events are from
:class:`~ignite.engine.events.Events`.
closing_event_name: event's name on which the progress bar is closed. Valid events are from
:class:`~ignite.engine.events.Events`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
Note:
Accepted output value types are numbers, 0d and 1d torch tensors and strings.
"""
desc = self.tqdm_kwargs.get("desc", None)
if event_name not in engine._allowed_events:
raise ValueError(f"Logging event {event_name.name} is not in allowed events for this engine")
if isinstance(closing_event_name, CallableEventWithFilter):
if closing_event_name.filter is not None:
raise ValueError("Closing Event should not be a filtered event")
if not self._compare_lt(event_name, closing_event_name):
raise ValueError(f"Logging event {event_name} should be called before closing event {closing_event_name}")
log_handler = _OutputHandler(
desc,
metric_names,
output_transform,
closing_event_name=closing_event_name,
state_attributes=state_attributes,
)
super(ProgressBar, self).attach(engine, log_handler, event_name)
engine.add_event_handler(closing_event_name, self._close)
def attach_opt_params_handler( # type: ignore[empty-body]
self, engine: Engine, event_name: Union[str, Events], *args: Any, **kwargs: Any
) -> RemovableEventHandle:
"""Intentionally empty"""
pass
def _create_output_handler(self, *args: Any, **kwargs: Any) -> "_OutputHandler":
return _OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable: # type: ignore[empty-body]
"""Intentionally empty"""
pass
class _OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
pbar = ProgressBar()
Args:
description: progress bar description.
metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
closing_event_name: event's name on which the progress bar is closed. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
state_attributes: list of attributes of the ``trainer.state`` to plot.
"""
def __init__(
self,
description: str,
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED,
state_attributes: Optional[List[str]] = None,
):
if metric_names is None and output_transform is None:
# This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
metric_names = []
super(_OutputHandler, self).__init__(
description, metric_names, output_transform, global_step_transform=None, state_attributes=state_attributes
)
self.closing_event_name = closing_event_name
@staticmethod
def get_max_number_events(event_name: Union[str, Events, CallableEventWithFilter], engine: Engine) -> Optional[int]:
if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
return engine.state.epoch_length
if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
return engine.state.max_epochs
return 1
def __call__(self, engine: Engine, logger: ProgressBar, event_name: Union[str, Events]) -> None:
pbar_total = self.get_max_number_events(event_name, engine)
if logger.pbar is None:
logger._reset(pbar_total=pbar_total)
max_epochs = engine.state.max_epochs
default_desc = "Iteration" if max_epochs == 1 else "Epoch"
desc = self.tag or default_desc
max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
if max_num_of_closing_events and max_num_of_closing_events > 1:
global_step = engine.state.get_event_attrib_value(self.closing_event_name)
desc += f" [{global_step}/{max_num_of_closing_events}]"
logger.pbar.set_description(desc) # type: ignore[attr-defined]
rendered_metrics = self._setup_output_metrics_state_attrs(engine, log_text=True)
metrics = OrderedDict()
for key, value in rendered_metrics.items():
key = "_".join(key[1:]) # tqdm has tag as description
metrics[key] = value
if metrics:
logger.pbar.set_postfix(metrics) # type: ignore[attr-defined]
global_step = engine.state.get_event_attrib_value(event_name)
if pbar_total is not None:
global_step = (global_step - 1) % pbar_total + 1
logger.pbar.update(global_step - logger.pbar.n) # type: ignore[attr-defined]
|
import random
import warnings
from collections import OrderedDict
from functools import wraps
from typing import Any, Callable, Generator, Iterator, List, Optional
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
from ignite.engine.engine import Engine
from ignite.engine.events import Events
from ignite.utils import manual_seed
__all__ = ["update_dataloader", "keep_random_state", "ReproducibleBatchSampler", "DeterministicEngine"]
def update_dataloader(dataloader: DataLoader, new_batch_sampler: BatchSampler) -> DataLoader:
"""Helper function to replace current batch sampler of the dataloader by a new batch sampler. Function returns new
dataloader with new batch sampler.
Args:
dataloader: input dataloader
new_batch_sampler: new batch sampler to use
Returns:
DataLoader
"""
params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")]
for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]:
if k in params_keys:
params_keys.remove(k)
params = {k: getattr(dataloader, k) for k in params_keys}
params["batch_sampler"] = new_batch_sampler
return type(dataloader)(**params)
class ReproducibleBatchSampler(BatchSampler):
"""Reproducible batch sampler. This class internally iterates and stores indices of the input batch sampler.
This helps to start providing data batches from an iteration in a deterministic way.
Args:
batch_sampler: batch sampler same as used with `torch.utils.data.DataLoader`.
start_iteration: optional start iteration.
Examples:
Setup dataloader with `ReproducibleBatchSampler` and start providing data batches from an iteration
.. code-block:: python
from ignite.engine.deterministic import update_dataloader
dataloader = update_dataloader(dataloader, ReproducibleBatchSampler(dataloader.batch_sampler))
# rewind dataloader to a specific iteration:
dataloader.batch_sampler.start_iteration = start_iteration
"""
def __init__(self, batch_sampler: BatchSampler, start_iteration: Optional[int] = None):
if not isinstance(batch_sampler, BatchSampler):
raise TypeError("Argument batch_sampler should be torch.utils.data.sampler.BatchSampler")
self.batch_indices: List = []
self.batch_sampler = batch_sampler
self.start_iteration = start_iteration
self.sampler = self.batch_sampler.sampler
def setup_batch_indices(self) -> None:
"""Setup batch indices."""
self.batch_indices = []
for batch in self.batch_sampler:
self.batch_indices.append(batch)
if self.start_iteration is not None:
self.batch_indices = self.batch_indices[self.start_iteration :]
self.start_iteration = None
def __iter__(self) -> Generator:
self.setup_batch_indices()
for batch in self.batch_indices:
yield batch
def __len__(self) -> int:
return len(self.batch_sampler)
def _get_rng_states() -> List[Any]:
output = [random.getstate(), torch.get_rng_state()]
try:
import numpy as np
output.append(np.random.get_state())
except ImportError:
pass
return output
def _set_rng_states(rng_states: List[Any]) -> None:
random.setstate(rng_states[0])
if "cpu" not in rng_states[1].device.type:
rng_states[1] = rng_states[1].cpu()
torch.set_rng_state(rng_states[1])
try:
import numpy as np
np.random.set_state(rng_states[2])
except ImportError:
pass
def _repr_rng_state(rng_states: List[Any]) -> str:
from hashlib import md5
out = " ".join([md5(str(list(s)).encode("utf-8")).hexdigest() for s in rng_states])
return out
def keep_random_state(func: Callable) -> Callable:
"""Helper decorator to keep random state of torch, numpy and random intact
while executing a function. For more details on usage, please see :ref:`Dataflow synchronization`.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> None:
rng_states = _get_rng_states()
func(*args, **kwargs)
_set_rng_states(rng_states)
return wrapper
class DeterministicEngine(Engine):
"""Deterministic engine derived from :class:`~ignite.engine.engine.Engine`.
"Deterministic" run is done by adding additional handlers to synchronize the dataflow and overriding some methods of
:class:`~ignite.engine.engine.Engine`:
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
If input data provider is `DataLoader`, its batch sampler is replaced by
:class:`~ignite.engine.deterministic.ReproducibleBatchSampler`.
.. code-block:: python
for e in range(num_epochs):
set_seed(seed_offset + e)
setup_sampling(dataloader)
if resume:
setup_saved_rng_states()
do_single_epoch_iterations(dataloader)
Internally, `torch.backends.cudnn.deterministic = True` and `torch.backends.cudnn.benchmark = False` are also
applied.
For more details about dataflow synchronization, please see :ref:`Dataflow synchronization`.
.. Note ::
This class can produce exactly the same dataflow when resuming the run from an epoch (or more precisely from
dataflow restart) and using torch `DataLoader` with `num_workers > 1` as data provider.
Args:
process_function: A function receiving a handle to the engine and the current batch
in each iteration, and returns data to be stored in the engine's state.
"""
def __init__(self, process_function: Callable[[Engine, Any], Any]):
super(DeterministicEngine, self).__init__(process_function)
self.state_dict_user_keys.append("rng_states")
if not hasattr(self.state, "rng_states"):
setattr(self.state, "rng_states", None)
self.add_event_handler(Events.STARTED, self._init_run)
self.add_event_handler(Events.DATALOADER_STOP_ITERATION | Events.TERMINATE_SINGLE_EPOCH, self._setup_seed)
def state_dict(self) -> OrderedDict:
state_dict = super(DeterministicEngine, self).state_dict()
state_dict["rng_states"] = _get_rng_states()
return state_dict
def _init_run(self) -> None:
self.state.seed = int(torch.randint(0, int(1e9), (1,)).item())
if torch.cuda.is_available():
if hasattr(torch, "use_deterministic_algorithms"):
torch.use_deterministic_algorithms(True, warn_only=True)
else:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _setup_engine(self) -> None:
if self.state.dataloader is None:
raise ValueError(
"Deterministic engine does not support the option of data=None. Please, provide data as iterable"
)
self._dataloader_len = self._get_data_length(self.state.dataloader)
# if input data is torch dataloader we replace batch sampler by a batch sampler
# such that its random sampling indices are reproducible by prefetching them before data iteration
if isinstance(self.state.dataloader, DataLoader):
# attribute _dataset_kind is introduced since 1.3.0 => before 1.3.0 all datasets are map-like
can_patch_dataloader = True
if hasattr(self.state.dataloader, "_dataset_kind"):
from torch.utils.data.dataloader import _DatasetKind
_dataloader_kind = self.state.dataloader._dataset_kind
can_patch_dataloader = _dataloader_kind == _DatasetKind.Map
if can_patch_dataloader:
if self._dataloader_len is not None and hasattr(self.state.dataloader.sampler, "epoch"):
if self._dataloader_len != self.state.epoch_length:
warnings.warn(
"When defined engine's epoch length is different of input dataloader length, "
"distributed sampler indices can not be setup in a reproducible manner"
)
batch_sampler = self.state.dataloader.batch_sampler
if not (batch_sampler is None or isinstance(batch_sampler, ReproducibleBatchSampler)):
self.state.dataloader = update_dataloader(
self.state.dataloader, ReproducibleBatchSampler(batch_sampler) # type: ignore[arg-type]
)
iteration = self.state.iteration
self._dataloader_iter = self._from_iteration(iteration)
# Below we define initial counter value for _run_once_on_dataset to measure a single epoch
if self.state.epoch_length is not None:
iteration %= self.state.epoch_length
self._init_iter = iteration
# restore rng state if in the middle
in_the_middle = self.state.iteration % self._dataloader_len > 0 if self._dataloader_len is not None else False
rng_states = getattr(self.state, "rng_states", None)
if rng_states is not None and in_the_middle:
_set_rng_states(rng_states)
setattr(self.state, "rng_states", None)
def _from_iteration(self, iteration: int) -> Iterator:
if self.state.dataloader is None:
raise RuntimeError(
"Internal error, self.state.dataloader is None. Please, file an issue if you encounter this error."
)
data = self.state.dataloader
if isinstance(data, DataLoader):
try:
# following is unsafe for IterableDatasets
iteration %= len(data.batch_sampler) # type: ignore[arg-type]
# Synchronize dataflow according to state.iteration
self._setup_seed()
if iteration > 0:
# batch sampler is ReproducibleBatchSampler
data.batch_sampler.start_iteration = iteration # type: ignore[union-attr]
return iter(data)
except TypeError as e:
# Probably we can do nothing with DataLoader built upon IterableDatasets
pass
self.logger.info("Resuming from iteration for provided data will fetch data until required iteration ...")
if hasattr(data, "__len__"):
iteration %= len(data) # type: ignore[arg-type]
# Synchronize dataflow from the begining
self._setup_seed(iteration=0)
data_iter = iter(data)
counter = 0
while counter < iteration:
try:
next(data_iter)
counter += 1
except StopIteration:
data_iter = iter(data)
return data_iter
def _setup_seed(self, _: Any = None, iter_counter: Optional[int] = None, iteration: Optional[int] = None) -> None:
if iter_counter is None:
le = self._dataloader_len if self._dataloader_len is not None else 1
elif not iter_counter > 0:
raise ValueError("iter_counter should be positive value")
else:
le = iter_counter
if iteration is None:
iteration = self.state.iteration
manual_seed(self.state.seed + iteration // le) # type: ignore[operator]
|
import numbers
import warnings
import weakref
from collections.abc import Sequence
from enum import Enum
from types import DynamicClassAttribute
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, TYPE_CHECKING, Union
from torch.utils.data import DataLoader
from ignite.engine.utils import _check_signature
if TYPE_CHECKING:
from ignite.engine.engine import Engine
__all__ = ["CallableEventWithFilter", "EventEnum", "Events", "State", "EventsList", "RemovableEventHandle"]
class CallableEventWithFilter:
"""Single Event containing a filter, specifying whether the event should
be run at the current event (if the event type is correct)
Args:
value: The actual enum value. Only needed for internal use. Do not touch!
event_filter: A function taking the engine and the current event value as input and returning a
boolean to indicate whether this event should be executed. Defaults to None, which will result to a
function that always returns `True`
name: The enum-name of the current object. Only needed for internal use. Do not touch!
"""
def __init__(self, value: str, event_filter: Optional[Callable] = None, name: Optional[str] = None) -> None:
self.filter = event_filter
if not hasattr(self, "_value_"):
self._value_ = value
if not hasattr(self, "_name_") and name is not None:
self._name_ = name
# copied to be compatible to enum
@DynamicClassAttribute
def name(self) -> str:
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self) -> str:
"""The value of the Enum member."""
return self._value_
def __call__(
self,
event_filter: Optional[Callable] = None,
every: Optional[int] = None,
once: Optional[Union[int, List]] = None,
before: Optional[int] = None,
after: Optional[int] = None,
) -> "CallableEventWithFilter":
"""
Makes the event class callable and accepts either an arbitrary callable as filter
(which must take in the engine and current event value and return a boolean) or an every or once value
Args:
event_filter: a filter function to check if the event should be executed when
the event type was fired
every: a value specifying how often the event should be fired
once: a value or list of values specifying when the event should be fired (if only once)
before: a value specifying the number of occurrence that event should be fired before
after: a value specifying the number of occurrence that event should be fired after
Returns:
CallableEventWithFilter: A new event having the same value but a different filter function
"""
if (
sum(
(
event_filter is not None,
once is not None,
(every is not None or before is not None or after is not None),
)
)
!= 1
):
raise ValueError("Only one of the input arguments should be specified, except before, after and every")
if (event_filter is not None) and not callable(event_filter):
raise TypeError("Argument event_filter should be a callable")
if (every is not None) and not (isinstance(every, numbers.Integral) and every > 0):
raise ValueError("Argument every should be integer and greater than zero")
if once is not None:
c1 = isinstance(once, numbers.Integral) and once > 0
c2 = isinstance(once, Sequence) and len(once) > 0 and all(isinstance(e, int) and e > 0 for e in once)
if not (c1 or c2):
raise ValueError(
f"Argument once should either be a positive integer or a list of positive integers, got {once}"
)
if (before is not None) and not (isinstance(before, numbers.Integral) and before >= 0):
raise ValueError("Argument before should be integer and greater or equal to zero")
if (after is not None) and not (isinstance(after, numbers.Integral) and after >= 0):
raise ValueError("Argument after should be integer and greater or equal to zero")
if every is not None:
if every == 1:
# Just return the event itself
event_filter = None
else:
event_filter = self.every_event_filter(every)
if once is not None:
event_filter = self.once_event_filter([once] if isinstance(once, int) else once)
if before is not None or after is not None:
if every is not None:
event_filter = self.every_before_and_after_event_filter(every, before, after)
else:
event_filter = self.before_and_after_event_filter(before, after)
# check signature:
if event_filter is not None:
_check_signature(event_filter, "event_filter", "engine", "event")
return CallableEventWithFilter(self.value, event_filter, self.name)
@staticmethod
def every_event_filter(every: int) -> Callable:
"""A wrapper for every event filter."""
def wrapper(engine: "Engine", event: int) -> bool:
if event % every == 0:
return True
return False
return wrapper
@staticmethod
def once_event_filter(once: List) -> Callable:
"""A wrapper for once event filter."""
def wrapper(engine: "Engine", event: int) -> bool:
if event in once:
return True
return False
return wrapper
@staticmethod
def before_and_after_event_filter(before: Optional[int] = None, after: Optional[int] = None) -> Callable:
"""A wrapper for before and after event filter."""
before_: Union[int, float] = float("inf") if before is None else before
after_: int = 0 if after is None else after
def wrapper(engine: "Engine", event: int) -> bool:
if event > after_ and event < before_:
return True
return False
return wrapper
@staticmethod
def every_before_and_after_event_filter(
every: int, before: Optional[int] = None, after: Optional[int] = None
) -> Callable:
"""A wrapper which triggers for every `every` iterations after `after` and before `before`."""
before_: Union[int, float] = float("inf") if before is None else before
after_: int = 0 if after is None else after
def wrapper(engine: "Engine", event: int) -> bool:
if after_ < event < before_ and (event - after_ - 1) % every == 0:
return True
return False
return wrapper
@staticmethod
def default_event_filter(engine: "Engine", event: int) -> bool:
"""Default event filter. This method is is deprecated and will be removed. Please, use None instead"""
warnings.warn("Events.default_event_filter is deprecated and will be removed. Please, use None instead")
return True
def __repr__(self) -> str:
out = f"Events.{self.name}"
if self.filter is not None:
out += f"(filter={self.filter})"
return out
def __eq__(self, other: Any) -> bool:
if isinstance(other, CallableEventWithFilter):
return self.name == other.name
elif isinstance(other, str):
return self.name == other
else:
return NotImplemented
def __hash__(self) -> int:
return hash(self._name_)
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventEnum(CallableEventWithFilter, Enum):
"""Base class for all :class:`~ignite.engine.events.Events`. User defined custom events should also inherit
this class.
Examples:
Custom events based on the loss calculation and backward pass can be created as follows:
.. code-block:: python
from ignite.engine import EventEnum
class BackpropEvents(EventEnum):
BACKWARD_STARTED = 'backward_started'
BACKWARD_COMPLETED = 'backward_completed'
OPTIM_STEP_COMPLETED = 'optim_step_completed'
def update(engine, batch):
# ...
loss = criterion(y_pred, y)
engine.fire_event(BackpropEvents.BACKWARD_STARTED)
loss.backward()
engine.fire_event(BackpropEvents.BACKWARD_COMPLETED)
optimizer.step()
engine.fire_event(BackpropEvents.OPTIM_STEP_COMPLETED)
# ...
trainer = Engine(update)
trainer.register_events(*BackpropEvents)
@trainer.on(BackpropEvents.BACKWARD_STARTED)
def function_before_backprop(engine):
# ...
"""
def __new__(cls, value: str) -> "EventEnum":
obj = CallableEventWithFilter.__new__(cls)
obj._value_ = value
return obj
class Events(EventEnum):
"""Events that are fired by the :class:`~ignite.engine.engine.Engine` during execution. Built-in events:
- STARTED : triggered when engine's run is started
- EPOCH_STARTED : triggered when the epoch is started
- GET_BATCH_STARTED : triggered before next batch is fetched
- GET_BATCH_COMPLETED : triggered after the batch is fetched
- ITERATION_STARTED : triggered when an iteration is started
- ITERATION_COMPLETED : triggered when the iteration is ended
- DATALOADER_STOP_ITERATION : engine's specific event triggered when dataloader has no more data to provide
- EXCEPTION_RAISED : triggered when an exception is encountered
- TERMINATE_SINGLE_EPOCH : triggered when the run is about to end the current epoch,
after receiving a :meth:`~ignite.engine.engine.Engine.terminate_epoch()` or
:meth:`~ignite.engine.engine.Engine.terminate()` call.
- TERMINATE : triggered when the run is about to end completely,
after receiving :meth:`~ignite.engine.engine.Engine.terminate()` call.
- EPOCH_COMPLETED : triggered when the epoch is ended. Note that this is triggered even
when :meth:`~ignite.engine.engine.Engine.terminate_epoch()` is called.
- COMPLETED : triggered when engine's run is completed
The table below illustrates which events are triggered when various termination methods are called.
.. list-table::
:widths: 24 25 33 18
:header-rows: 1
* - Method
- EVENT_COMPLETED
- TERMINATE_SINGLE_EPOCH
- TERMINATE
* - no termination
- ✔
- ✗
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate_epoch()`
- ✔
- ✔
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate()`
- ✗
- ✔
- ✔
Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine:
.. code-block:: python
engine = Engine()
# a) custom event filter
def custom_event_filter(engine, event):
if event in [1, 2, 5, 10, 50, 100]:
return True
return False
@engine.on(Events.ITERATION_STARTED(event_filter=custom_event_filter))
def call_on_special_event(engine):
# do something on 1, 2, 5, 10, 50, 100 iterations
# b) "every" event filter
@engine.on(Events.ITERATION_STARTED(every=10))
def call_every(engine):
# do something every 10th iteration
# c) "once" event filter
@engine.on(Events.ITERATION_STARTED(once=50))
def call_once(engine):
# do something on 50th iteration
# d) "before" and "after" event filter
@engine.on(Events.EPOCH_STARTED(before=30, after=10))
def call_before(engine):
# do something in 11 to 29 epoch
# e) Mixing "every" and "before" / "after" event filters
@engine.on(Events.EPOCH_STARTED(every=5, before=25, after=8))
def call_every_itr_before_after(engine):
# do something on 9, 14, 19, 24 epochs
Event filter function `event_filter` accepts as input `engine` and `event` and should return True/False.
Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed.
Since v0.4.0, user can also combine events with `|`-operator:
.. code-block:: python
events = Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
Since v0.4.0, custom events defined by user should inherit from :class:`~ignite.engine.events.EventEnum` :
.. code-block:: python
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
"""
EPOCH_STARTED = "epoch_started"
"""triggered when the epoch is started."""
EPOCH_COMPLETED = "epoch_completed"
"""Event attribute indicating epoch is ended."""
STARTED = "started"
"""triggered when engine's run is started."""
COMPLETED = "completed"
"""triggered when engine's run is completed"""
ITERATION_STARTED = "iteration_started"
"""triggered when an iteration is started."""
ITERATION_COMPLETED = "iteration_completed"
"""triggered when the iteration is ended."""
EXCEPTION_RAISED = "exception_raised"
"""triggered when an exception is encountered."""
GET_BATCH_STARTED = "get_batch_started"
"""triggered before next batch is fetched."""
GET_BATCH_COMPLETED = "get_batch_completed"
"""triggered after the batch is fetched."""
DATALOADER_STOP_ITERATION = "dataloader_stop_iteration"
"""engine's specific event triggered when dataloader has no more data to provide"""
TERMINATE = "terminate"
"""triggered when the run is about to end completely, after receiving terminate() call."""
TERMINATE_SINGLE_EPOCH = "terminate_single_epoch"
"""triggered when the run is about to end the current epoch,
after receiving a terminate_epoch() call."""
INTERRUPT = "interrupt"
"""triggered when the run is interrupted, after receiving interrupt() call."""
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventsList:
"""Collection of events stacked by operator `__or__`.
.. code-block:: python
events = Events.STARTED | Events.COMPLETED
events |= Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
or
.. code-block:: python
@engine.on(Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3))
def call_on_events(engine):
# do something
"""
def __init__(self) -> None:
self._events: List[Union[Events, CallableEventWithFilter]] = []
def _append(self, event: Union[Events, CallableEventWithFilter]) -> None:
if not isinstance(event, (Events, CallableEventWithFilter)):
raise TypeError(f"Argument event should be Events or CallableEventWithFilter, got: {type(event)}")
self._events.append(event)
def __getitem__(self, item: int) -> Union[Events, CallableEventWithFilter]:
return self._events[item]
def __iter__(self) -> Iterator[Union[Events, CallableEventWithFilter]]:
return iter(self._events)
def __len__(self) -> int:
return len(self._events)
def __or__(self, other: Union[Events, CallableEventWithFilter]) -> "EventsList":
self._append(event=other)
return self
class State:
"""An object that is used to pass internal and user-defined state between event handlers. By default, state
contains the following attributes:
.. code-block:: python
state.iteration # 1-based, the first iteration is 1
state.epoch # 1-based, the first epoch is 1
state.seed # seed to set at each epoch
state.dataloader # data passed to engine
state.epoch_length # optional length of an epoch
state.max_epochs # number of epochs to run
state.max_iters # number of iterations to run
state.batch # batch passed to `process_function`
state.output # output of `process_function` after a single iteration
state.metrics # dictionary with defined metrics if any
state.times # dictionary with total and per-epoch times fetched on
# keys: Events.EPOCH_COMPLETED.name and Events.COMPLETED.name
Args:
kwargs: keyword arguments to be defined as State attributes.
"""
event_to_attr: Dict[Union[str, "Events", "CallableEventWithFilter"], str] = {
Events.GET_BATCH_STARTED: "iteration",
Events.GET_BATCH_COMPLETED: "iteration",
Events.ITERATION_STARTED: "iteration",
Events.ITERATION_COMPLETED: "iteration",
Events.EPOCH_STARTED: "epoch",
Events.EPOCH_COMPLETED: "epoch",
Events.STARTED: "epoch",
Events.COMPLETED: "epoch",
}
def __init__(self, **kwargs: Any) -> None:
self.iteration = 0
self.epoch = 0
self.epoch_length: Optional[int] = None
self.max_epochs: Optional[int] = None
self.max_iters: Optional[int] = None
self.output: Optional[int] = None
self.batch: Optional[int] = None
self.metrics: Dict[str, Any] = {}
self.dataloader: Optional[Union[DataLoader, Iterable[Any]]] = None
self.seed: Optional[int] = None
self.times: Dict[str, Optional[float]] = {
Events.EPOCH_COMPLETED.name: None,
Events.COMPLETED.name: None,
}
for k, v in kwargs.items():
setattr(self, k, v)
self._update_attrs()
def _update_attrs(self) -> None:
for value in self.event_to_attr.values():
if not hasattr(self, value):
setattr(self, value, 0)
def get_event_attrib_value(self, event_name: Union[str, Events, CallableEventWithFilter]) -> int:
"""Get the value of Event attribute with given `event_name`."""
if event_name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{event_name}'")
return getattr(self, State.event_to_attr[event_name])
def __repr__(self) -> str:
s = "State:\n"
for attr, value in self.__dict__.items():
if not isinstance(value, (numbers.Number, str)):
value = type(value)
s += f"\t{attr}: {value}\n"
return s
class RemovableEventHandle:
"""A weakref handle to remove a registered event.
A handle that may be used to remove a registered event handler via the
remove method, with-statement, or context manager protocol. Returned from
:meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: Registered event name.
handler: Registered event handler, stored as weakref.
engine: Target engine, stored as weakref.
Examples:
.. code-block:: python
engine = Engine()
def print_epoch(engine):
print(f"Epoch: {engine.state.epoch}")
with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch):
# print_epoch handler registered for a single run
engine.run(data)
# print_epoch handler is now unregistered
"""
def __init__(
self, event_name: Union[CallableEventWithFilter, Enum, EventsList, Events], handler: Callable, engine: "Engine"
) -> None:
self.event_name = event_name
self.handler = weakref.ref(handler)
self.engine = weakref.ref(engine)
def remove(self) -> None:
"""Remove handler from engine."""
handler = self.handler()
engine = self.engine()
if handler is None or engine is None:
return
if hasattr(handler, "_parent"):
handler = handler._parent()
if handler is None:
raise RuntimeError(
"Internal error! Please fill an issue on https://github.com/pytorch/ignite/issues "
"if encounter this error. Thank you!"
)
if isinstance(self.event_name, EventsList):
for e in self.event_name:
if engine.has_event_handler(handler, e):
engine.remove_event_handler(handler, e)
else:
if engine.has_event_handler(handler, self.event_name):
engine.remove_event_handler(handler, self.event_name)
def __enter__(self) -> "RemovableEventHandle":
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
self.remove()
|
from collections.abc import Mapping
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.engine.deterministic import DeterministicEngine
from ignite.engine.engine import Engine
from ignite.engine.events import CallableEventWithFilter, EventEnum, Events, EventsList, RemovableEventHandle, State
from ignite.metrics import Metric
from ignite.utils import convert_tensor
__all__ = [
"State",
"create_supervised_trainer",
"create_supervised_evaluator",
"Engine",
"DeterministicEngine",
"Events",
"EventsList",
"EventEnum",
"CallableEventWithFilter",
"RemovableEventHandle",
"supervised_training_step",
"supervised_training_step_amp",
"supervised_training_step_apex",
"supervised_training_step_tpu",
"supervised_evaluation_step",
"supervised_evaluation_step_amp",
]
def _prepare_batch(
batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
) -> Tuple[Union[torch.Tensor, Sequence, Mapping, str, bytes], ...]:
"""Prepare batch for training or evaluation: pass to a device with options."""
x, y = batch
return (
convert_tensor(x, device=device, non_blocking=non_blocking),
convert_tensor(y, device=device, non_blocking=non_blocking),
)
def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_amp(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch.cuda.amp``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
scaler: GradScaler instance for gradient scaling. (default: None)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_amp
model = ...
optimizer = ...
loss_fn = ...
scaler = torch.cuda.amp.GradScaler(2**10)
update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if scaler:
scaler.scale(loss).backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_apex(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using apex.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_apex
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step_apex(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
from apex import amp as apex_amp
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install apex from https://github.com/nvidia/apex to use amp_mode='apex'.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
with apex_amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def supervised_training_step_tpu(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training using ``torch_xla``.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, TPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Examples:
.. code-block:: python
from ignite.engine import Engine, supervised_training_step_tpu
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step_tpu(model, optimizer, loss_fn, 'xla')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.7
Added Gradient Accumulation argument for all supervised training methods.
.. versionchanged:: 0.4.11
Added `model_transform` to transform model's output
"""
try:
import torch_xla.core.xla_model as xm
except ModuleNotFoundError:
raise ModuleNotFoundError("torch_xla cannot be imported, please install PyTorch XLA.")
if gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
optimizer.zero_grad()
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
loss = loss_fn(y_pred, y)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
xm.optimizer_step(optimizer, barrier=True)
return output_transform(x, y, y_pred, loss * gradient_accumulation_steps)
return update
def _check_arg(
on_tpu: bool, amp_mode: Optional[str], scaler: Optional[Union[bool, "torch.cuda.amp.GradScaler"]]
) -> Tuple[Optional[str], Optional["torch.cuda.amp.GradScaler"]]:
"""Checking tpu, amp and GradScaler instance combinations."""
if on_tpu and not idist.has_xla_support:
raise RuntimeError("In order to run on TPU, please install PyTorch XLA")
if amp_mode and on_tpu:
raise ValueError("amp_mode cannot be used with xla device. Consider using amp_mode=None or device='cuda'.")
if scaler:
if amp_mode != "amp":
raise ValueError(f"scaler argument is {scaler}, but amp_mode is {amp_mode}. Consider using amp_mode='amp'.")
elif amp_mode == "amp" and isinstance(scaler, bool):
try:
from torch.cuda.amp import GradScaler
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use scaler argument.")
scaler = GradScaler(enabled=True)
if on_tpu:
return "tpu", None
elif scaler and amp_mode == "amp":
return amp_mode, scaler # type: ignore[return-value]
else:
return amp_mode, None
def create_supervised_trainer(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
deterministic: bool = False,
amp_mode: Optional[str] = None,
scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
gradient_accumulation_steps: int = 1,
) -> Engine:
"""Factory function for creating a trainer for supervised models.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU or TPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the form as required
by the loss function
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
deterministic: if True, returns deterministic engine of type
:class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine`
(default: False).
amp_mode: can be ``amp`` or ``apex``, model and optimizer will be casted to float16 using
`torch.cuda.amp <https://pytorch.org/docs/stable/amp.html>`_ for ``amp`` and
using `apex <https://nvidia.github.io/apex>`_ for ``apex``. (default: None)
scaler: GradScaler instance for gradient scaling if `torch>=1.6.0`
and ``amp_mode`` is ``amp``. If ``amp_mode`` is ``apex``, this argument will be ignored.
If True, will create default GradScaler. If GradScaler instance is passed, it will be used instead.
(default: False)
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
a trainer engine with supervised update function.
Examples:
Create a trainer
.. code-block:: python
from ignite.engine import create_supervised_trainer
from ignite.utils import convert_tensor
from ignite.contrib.handlers.tqdm_logger import ProgressBar
model = ...
loss = ...
optimizer = ...
dataloader = ...
def prepare_batch_fn(batch, device, non_blocking):
x = ... # get x from batch
y = ... # get y from batch
# return a tuple of (x, y) that can be directly runned as
# `loss_fn(model(x), y)`
return (
convert_tensor(x, device, non_blocking),
convert_tensor(y, device, non_blocking)
)
def output_transform_fn(x, y, y_pred, loss):
# return only the loss is actually the default behavior for
# trainer engine, but you can return anything you want
return loss.item()
trainer = create_supervised_trainer(
model,
optimizer,
loss,
prepare_batch=prepare_batch_fn,
output_transform=output_transform_fn
)
pbar = ProgressBar()
pbar.attach(trainer, output_transform=lambda x: {"loss": x})
trainer.run(dataloader, max_epochs=5)
Note:
If ``scaler`` is True, GradScaler instance will be created internally and trainer state has attribute named
``scaler`` for that instance and can be used for saving and loading.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
of the processed batch by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
.. warning::
If ``amp_mode='apex'`` , the model(s) and optimizer(s) must be initialized beforehand
since ``amp.initialize`` should be called after you have finished constructing your model(s)
and optimizer(s), but before you send your model through any DistributedDataParallel wrapper.
See more: https://nvidia.github.io/apex/amp.html#module-apex.amp
.. versionchanged:: 0.4.5
- Added ``amp_mode`` argument for automatic mixed precision.
- Added ``scaler`` argument for gradient scaling.
.. versionchanged:: 0.4.7
Added Gradient Accumulation argument for all supervised training methods.
.. versionchanged:: 0.4.11
Added ``model_transform`` to transform model's output
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _scaler = _check_arg(on_tpu, amp_mode, scaler)
if mode == "amp":
_update = supervised_training_step_amp(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
_scaler,
gradient_accumulation_steps,
)
elif mode == "apex":
_update = supervised_training_step_apex(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
elif mode == "tpu":
_update = supervised_training_step_tpu(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
else:
_update = supervised_training_step(
model,
optimizer,
loss_fn,
device,
non_blocking,
prepare_batch,
model_transform,
output_transform,
gradient_accumulation_steps,
)
trainer = Engine(_update) if not deterministic else DeterministicEngine(_update)
if _scaler and scaler and isinstance(scaler, bool):
trainer.state.scaler = _scaler # type: ignore[attr-defined]
return trainer
def supervised_evaluation_step(
model: torch.nn.Module,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
) -> Callable:
"""
Factory function for supervised evaluation.
Args:
model: the model to train.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Returns:
Inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
output = model(x)
y_pred = model_transform(output)
return output_transform(x, y, y_pred)
return evaluate_step
def supervised_evaluation_step_amp(
model: torch.nn.Module,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
) -> Callable:
"""
Factory function for supervised evaluation using ``torch.cuda.amp``.
Args:
model: the model to train.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Returns:
Inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
try:
from torch.cuda.amp import autocast
except ImportError:
raise ImportError("Please install torch>=1.6.0 to use amp_mode='amp'.")
def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
with autocast(enabled=True):
output = model(x)
y_pred = model_transform(output)
return output_transform(x, y, y_pred)
return evaluate_step
def create_supervised_evaluator(
model: torch.nn.Module,
metrics: Optional[Dict[str, Metric]] = None,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
model_transform: Callable[[Any], Any] = lambda output: output,
output_transform: Callable[[Any, Any, Any], Any] = lambda x, y, y_pred: (y_pred, y),
amp_mode: Optional[str] = None,
) -> Engine:
"""
Factory function for creating an evaluator for supervised models.
Args:
model: the model to train.
metrics: a map of metric names to Metrics.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
model_transform: function that receives the output from the model and convert it into the predictions:
``y_pred = model_transform(model(x))``.
output_transform: function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
amp_mode: can be ``amp``, model will be casted to float16 using
`torch.cuda.amp <https://pytorch.org/docs/stable/amp.html>`_
Returns:
an evaluator engine with supervised inference function.
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
.. versionchanged:: 0.4.5
Added ``amp_mode`` argument for automatic mixed precision.
.. versionchanged:: 0.4.12
Added ``model_transform`` to transform model's output
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
mode, _ = _check_arg(on_tpu, amp_mode, None)
metrics = metrics or {}
if mode == "amp":
evaluate_step = supervised_evaluation_step_amp(
model,
device,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
model_transform=model_transform,
output_transform=output_transform,
)
else:
evaluate_step = supervised_evaluation_step(
model,
device,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
model_transform=model_transform,
output_transform=output_transform,
)
evaluator = Engine(evaluate_step)
for name, metric in metrics.items():
metric.attach(evaluator, name)
return evaluator
|
import functools
import logging
import math
import time
import warnings
import weakref
from collections import defaultdict, OrderedDict
from collections.abc import Mapping
from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union
from torch.utils.data import DataLoader
from ignite.base import Serializable
from ignite.engine.events import CallableEventWithFilter, EventEnum, Events, EventsList, RemovableEventHandle, State
from ignite.engine.utils import _check_signature, _to_hours_mins_secs
__all__ = ["Engine"]
class Engine(Serializable):
"""Runs a given ``process_function`` over each batch of a dataset, emitting events as it goes.
Args:
process_function: A function receiving a handle to the engine and the current batch
in each iteration, and returns data to be stored in the engine's state.
Attributes:
state: object that is used to pass internal and user-defined state between event handlers.
It is created with the engine and its attributes (e.g. ``state.iteration``, ``state.epoch`` etc) are reset
on every :meth:`~ignite.engine.engine.Engine.run`.
last_event_name: last event name triggered by the engine.
Note:
:class:`~ignite.engine.engine.Engine` implementation has changed in v0.4.10 with "interrupt/resume" feature.
Engine may behave differently on certain corner cases compared to the one from v0.4.9 and before.
In such case, you can set ``Engine.interrupt_resume_enabled = False`` to restore previous behaviour.
Examples:
Create a basic trainer
.. code-block:: python
model = ...
model = model.cuda()
optimized = ...
criterion = ...
def train_step(engine, batch):
model.train()
inputs, targets = batch[0].cuda(), batch[1].cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(update_model)
@trainer.on(Events.ITERATION_COMPLETED(every=100))
def log_training(engine):
batch_loss = engine.state.output
lr = optimizer.param_groups[0]['lr']
e = engine.state.epoch
n = engine.state.max_epochs
i = engine.state.iteration
print(f"Epoch {e}/{n} : {i} - batch loss: {batch_loss}, lr: {lr}")
trainer.run(data_loader, max_epochs=5)
> Epoch 1/5 : 100 - batch loss: 0.10874069479016124, lr: 0.01
> ...
> Epoch 2/5 : 1700 - batch loss: 0.4217900575859437, lr: 0.01
Create a basic evaluator to compute metrics
.. code-block:: python
from ignite.metrics import Accuracy
def predict_on_batch(engine, batch)
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return y_pred, y
evaluator = Engine(predict_on_batch)
Accuracy().attach(evaluator, "val_acc")
evaluator.run(val_dataloader)
Compute image mean/std on training dataset
.. code-block:: python
from ignite.metrics import Average
def compute_mean_std(engine, batch):
b, c, *_ = batch['image'].shape
data = batch['image'].reshape(b, c, -1).to(dtype=torch.float64)
mean = torch.mean(data, dim=-1).sum(dim=0)
mean2 = torch.mean(data ** 2, dim=-1).sum(dim=0)
return {"mean": mean, "mean^2": mean2}
compute_engine = Engine(compute_mean_std)
img_mean = Average(output_transform=lambda output: output['mean'])
img_mean.attach(compute_engine, 'mean')
img_mean2 = Average(output_transform=lambda output: output['mean^2'])
img_mean2.attach(compute_engine, 'mean2')
state = compute_engine.run(train_loader)
state.metrics['std'] = torch.sqrt(state.metrics['mean2'] - state.metrics['mean'] ** 2)
mean = state.metrics['mean'].tolist()
std = state.metrics['std'].tolist()
Resume engine's run from a state. User can load a `state_dict` and run engine starting from loaded state :
.. code-block:: python
# Restore from an epoch
state_dict = {"epoch": 3, "max_epochs": 100, "epoch_length": len(data_loader)}
# or an iteration
# state_dict = {"iteration": 500, "max_epochs": 100, "epoch_length": len(data_loader)}
trainer = Engine(...)
trainer.load_state_dict(state_dict)
trainer.run(data)
"""
_state_dict_all_req_keys = ("epoch_length", "max_epochs")
_state_dict_one_of_opt_keys = ("iteration", "epoch")
# Flag to disable engine._internal_run as generator feature for BC
interrupt_resume_enabled = True
def __init__(self, process_function: Callable[["Engine", Any], Any]):
self._event_handlers: Dict[Any, List] = defaultdict(list)
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self._process_function = process_function
self.last_event_name: Optional[Events] = None
self.should_terminate = False
self.should_terminate_single_epoch = False
self.should_interrupt = False
self.state = State()
self._state_dict_user_keys: List[str] = []
self._allowed_events: List[EventEnum] = []
self._dataloader_iter: Optional[Iterator[Any]] = None
self._init_iter: Optional[int] = None
self.register_events(*Events)
if self._process_function is None:
raise ValueError("Engine must be given a processing function in order to run.")
_check_signature(process_function, "process_function", self, None)
# generator provided by self._internal_run_as_gen
self._internal_run_generator: Optional[Generator] = None
def register_events(
self, *event_names: Union[List[str], List[EventEnum]], event_to_attr: Optional[dict] = None
) -> None:
"""Add events that can be fired.
Registering an event will let the user trigger these events at any point.
This opens the door to make the :meth:`~ignite.engine.engine.Engine.run` loop even more
configurable.
By default, the events from :class:`~ignite.engine.events.Events` are registered.
Args:
event_names: Defines the name of the event being supported. New events can be a str
or an object derived from :class:`~ignite.engine.events.EventEnum`. See example below.
event_to_attr: A dictionary to map an event to a state attribute.
Examples:
.. code-block:: python
from ignite.engine import Engine, Events, EventEnum
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
def process_function(e, batch):
# ...
trainer.fire_event("bwd_event")
loss.backward()
# ...
trainer.fire_event("opt_event")
optimizer.step()
trainer = Engine(process_function)
trainer.register_events(*CustomEvents)
trainer.register_events("bwd_event", "opt_event")
@trainer.on(Events.EPOCH_COMPLETED)
def trigger_custom_event():
if required(...):
trainer.fire_event(CustomEvents.FOO_EVENT)
else:
trainer.fire_event(CustomEvents.BAR_EVENT)
@trainer.on(CustomEvents.FOO_EVENT)
def do_foo_op():
# ...
@trainer.on(CustomEvents.BAR_EVENT)
def do_bar_op():
# ...
Example with State Attribute:
.. code-block:: python
from enum import Enum
from ignite.engine import Engine, EventEnum
class TBPTT_Events(EventEnum):
TIME_ITERATION_STARTED = "time_iteration_started"
TIME_ITERATION_COMPLETED = "time_iteration_completed"
TBPTT_event_to_attr = {
TBPTT_Events.TIME_ITERATION_STARTED: 'time_iteration',
TBPTT_Events.TIME_ITERATION_COMPLETED: 'time_iteration'
}
engine = Engine(process_function)
engine.register_events(*TBPTT_Events, event_to_attr=TBPTT_event_to_attr)
engine.run(data)
# engine.state contains an attribute time_iteration, which can be accessed
# using engine.state.time_iteration
"""
if not (event_to_attr is None or isinstance(event_to_attr, dict)):
raise ValueError(f"Expected event_to_attr to be dictionary. Got {type(event_to_attr)}.")
for index, e in enumerate(event_names):
if not isinstance(e, (str, EventEnum)):
raise TypeError(f"Value at {index} of event_names should be a str or EventEnum, but given {e}")
self._allowed_events.append(e)
if event_to_attr and e in event_to_attr:
State.event_to_attr[e] = event_to_attr[e]
# we need to update state attributes associated with new custom events
self.state._update_attrs()
def _handler_wrapper(self, handler: Callable, event_name: Any, event_filter: Callable) -> Callable:
# signature of the following wrapper will be inspected during registering to check if engine is necessary
# we have to build a wrapper with relevant signature : solution is functools.wraps
@functools.wraps(handler)
def wrapper(*args: Any, **kwargs: Any) -> Any:
event = self.state.get_event_attrib_value(event_name)
if event_filter(self, event):
return handler(*args, **kwargs)
# setup input handler as parent to make has_event_handler work
setattr(wrapper, "_parent", weakref.ref(handler))
return wrapper
def _assert_allowed_event(self, event_name: Any) -> None:
if event_name not in self._allowed_events:
self.logger.error(f"attempt to add event handler to an invalid event {event_name}")
raise ValueError(f"Event {event_name} is not a valid event for this {self.__class__.__name__}.")
def add_event_handler(self, event_name: Any, handler: Callable, *args: Any, **kwargs: Any) -> RemovableEventHandle:
"""Add an event handler to be executed when the specified event is fired.
Args:
event_name: An event or a list of events to attach the handler. Valid events are
from :class:`~ignite.engine.events.Events` or any ``event_name`` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
handler: the callable event handler that should be invoked. No restrictions on its signature.
The first argument can be optionally `engine`, the :class:`~ignite.engine.engine.Engine` object,
handler is bound to.
args: optional args to be passed to ``handler``.
kwargs: optional keyword args to be passed to ``handler``.
Returns:
:class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
Note:
Note that other arguments can be passed to the handler in addition to the `*args` and `**kwargs`
passed here, for example during :attr:`~ignite.engine.events.Events.EXCEPTION_RAISED`.
Examples:
.. code-block:: python
engine = Engine(process_function)
def print_epoch(engine):
print(f"Epoch: {engine.state.epoch}")
engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch)
events_list = Events.EPOCH_COMPLETED | Events.COMPLETED
def execute_something():
# do some thing not related to engine
pass
engine.add_event_handler(events_list, execute_something)
Note:
Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine.
See :class:`~ignite.engine.events.Events` for more details.
"""
if isinstance(event_name, EventsList):
for e in event_name:
self.add_event_handler(e, handler, *args, **kwargs)
return RemovableEventHandle(event_name, handler, self)
if isinstance(event_name, CallableEventWithFilter) and event_name.filter is not None:
event_filter = event_name.filter
handler = self._handler_wrapper(handler, event_name, event_filter)
self._assert_allowed_event(event_name)
event_args: Tuple[Any, ...] = ()
if event_name == Events.EXCEPTION_RAISED:
event_args += (Exception(),)
elif event_name == Events.TERMINATE_SINGLE_EPOCH:
event_args += (0,)
try:
_check_signature(handler, "handler", self, *(event_args + args), **kwargs)
self._event_handlers[event_name].append((handler, (self,) + args, kwargs))
except ValueError:
_check_signature(handler, "handler", *(event_args + args), **kwargs)
self._event_handlers[event_name].append((handler, args, kwargs))
self.logger.debug(f"Added handler for event {event_name}")
return RemovableEventHandle(event_name, handler, self)
def has_event_handler(self, handler: Callable, event_name: Optional[Any] = None) -> bool:
"""Check if the specified event has the specified handler.
Args:
handler: the callable event handler.
event_name: The event the handler attached to. Set this
to ``None`` to search all events.
"""
if event_name is not None:
if event_name not in self._event_handlers:
return False
events: Union[List[Any], Dict[Any, List]] = [event_name]
else:
events = self._event_handlers
for e in events:
for h, _, _ in self._event_handlers[e]:
if self._compare_handlers(handler, h):
return True
return False
@staticmethod
def _compare_handlers(user_handler: Callable, registered_handler: Callable) -> bool:
if hasattr(registered_handler, "_parent"):
registered_handler = registered_handler._parent()
return registered_handler == user_handler
def remove_event_handler(self, handler: Callable, event_name: Any) -> None:
"""Remove event handler `handler` from registered handlers of the engine
Args:
handler: the callable event handler that should be removed
event_name: The event the handler attached to.
"""
if event_name not in self._event_handlers:
raise ValueError(f"Input event name '{event_name}' does not exist")
new_event_handlers = [
(h, args, kwargs)
for h, args, kwargs in self._event_handlers[event_name]
if not self._compare_handlers(handler, h)
]
if len(new_event_handlers) == len(self._event_handlers[event_name]):
raise ValueError(f"Input handler '{handler}' is not found among registered event handlers")
self._event_handlers[event_name] = new_event_handlers
def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable:
"""Decorator shortcut for :meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.events.Events`
or any ``event_name`` added by :meth:`~ignite.engine.engine.Engine.register_events`.
args: optional args to be passed to `handler`.
kwargs: optional keyword args to be passed to `handler`.
Examples:
.. code-block:: python
engine = Engine(process_function)
@engine.on(Events.EPOCH_COMPLETED)
def print_epoch():
print(f"Epoch: {engine.state.epoch}")
@engine.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def execute_something():
# do some thing not related to engine
pass
"""
def decorator(f: Callable) -> Callable:
self.add_event_handler(event_name, f, *args, **kwargs)
return f
return decorator
def _fire_event(self, event_name: Any, *event_args: Any, **event_kwargs: Any) -> None:
"""Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. Optional positional and keyword arguments can be used to
pass arguments to **all** handlers added with this event. These
arguments updates arguments passed using :meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
*event_args: optional args to be passed to all handlers.
**event_kwargs: optional keyword args to be passed to all handlers.
"""
self.logger.debug(f"{self.state.epoch} | {self.state.iteration}, Firing handlers for event {event_name}")
self.last_event_name = event_name
for func, args, kwargs in self._event_handlers[event_name]:
kwargs.update(event_kwargs)
first, others = ((args[0],), args[1:]) if (args and args[0] == self) else ((), args)
func(*first, *(event_args + others), **kwargs)
def fire_event(self, event_name: Any) -> None:
"""Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. This is the method used in :meth:`~ignite.engine.engine.Engine.run` to call the
core events found in :class:`~ignite.engine.events.Events`.
Custom events can be fired if they have been registered before with
:meth:`~ignite.engine.engine.Engine.register_events`. The engine `state` attribute should be used
to exchange "dynamic" data among `process_function` and handlers.
This method is called automatically for core events. If no custom
events are used in the engine, there is no need for the user to call
the method.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
"""
self._assert_allowed_event(event_name)
return self._fire_event(event_name)
def interrupt(self) -> None:
"""Sends interrupt signal to the engine, so that it interrupts the run after
the current iteration. The run can be resumed by calling
:meth:`~ignite.engine.engine.Engine.run`. Data iteration will continue from the interrupted state.
Examples:
.. testcode::
from ignite.engine import Engine, Events
data = range(10)
max_epochs = 3
def check_input_data(e, b):
print(f"Epoch {engine.state.epoch}, Iter {engine.state.iteration} | data={b}")
i = (e.state.iteration - 1) % len(data)
assert b == data[i]
engine = Engine(check_input_data)
@engine.on(Events.ITERATION_COMPLETED(every=11))
def call_interrupt():
engine.interrupt()
print("Start engine run with interruptions:")
state = engine.run(data, max_epochs=max_epochs)
print("1 Engine run is interrupted at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("2 Engine run is interrupted at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("3 Engine ended the run at ", state.epoch, state.iteration)
.. dropdown:: Output
.. testoutput::
Start engine run with interruptions:
Epoch 1, Iter 1 | data=0
Epoch 1, Iter 2 | data=1
Epoch 1, Iter 3 | data=2
Epoch 1, Iter 4 | data=3
Epoch 1, Iter 5 | data=4
Epoch 1, Iter 6 | data=5
Epoch 1, Iter 7 | data=6
Epoch 1, Iter 8 | data=7
Epoch 1, Iter 9 | data=8
Epoch 1, Iter 10 | data=9
Epoch 2, Iter 11 | data=0
1 Engine run is interrupted at 2 11
Epoch 2, Iter 12 | data=1
Epoch 2, Iter 13 | data=2
Epoch 2, Iter 14 | data=3
Epoch 2, Iter 15 | data=4
Epoch 2, Iter 16 | data=5
Epoch 2, Iter 17 | data=6
Epoch 2, Iter 18 | data=7
Epoch 2, Iter 19 | data=8
Epoch 2, Iter 20 | data=9
Epoch 3, Iter 21 | data=0
Epoch 3, Iter 22 | data=1
2 Engine run is interrupted at 3 22
Epoch 3, Iter 23 | data=2
Epoch 3, Iter 24 | data=3
Epoch 3, Iter 25 | data=4
Epoch 3, Iter 26 | data=5
Epoch 3, Iter 27 | data=6
Epoch 3, Iter 28 | data=7
Epoch 3, Iter 29 | data=8
Epoch 3, Iter 30 | data=9
3 Engine ended the run at 3 30
.. versionadded:: 0.4.10
"""
if not self.interrupt_resume_enabled:
raise RuntimeError(
"Engine 'interrupt/resume' feature is disabled. "
"Please, set Engine.interrupt_resume_enabled=True to enable it"
)
self.logger.info("interrupt signaled. Engine will interrupt the run after current iteration is finished.")
self.should_interrupt = True
def terminate(self) -> None:
"""Sends terminate signal to the engine, so that it terminates completely the run. The run is
terminated after the event on which ``terminate`` method was called. The following events are triggered:
- ...
- Terminating event
- :attr:`~ignite.engine.events.Events.TERMINATE`
- :attr:`~ignite.engine.events.Events.COMPLETED`
Examples:
.. testcode::
from ignite.engine import Engine, Events
def func(engine, batch):
print(engine.state.epoch, engine.state.iteration, " | ", batch)
max_epochs = 4
data = range(10)
engine = Engine(func)
@engine.on(Events.ITERATION_COMPLETED(once=14))
def terminate():
print(f"-> terminate at iteration: {engine.state.iteration}")
engine.terminate()
print("Start engine run:")
state = engine.run(data, max_epochs=max_epochs)
print("1 Engine run is terminated at ", state.epoch, state.iteration)
state = engine.run(data, max_epochs=max_epochs)
print("2 Engine ended the run at ", state.epoch, state.iteration)
.. dropdown:: Output
.. testoutput::
Start engine run:
1 1 | 0
1 2 | 1
1 3 | 2
1 4 | 3
1 5 | 4
1 6 | 5
1 7 | 6
1 8 | 7
1 9 | 8
1 10 | 9
2 11 | 0
2 12 | 1
2 13 | 2
2 14 | 3
-> terminate at iteration: 14
1 Engine run is terminated at 2 14
3 15 | 0
3 16 | 1
3 17 | 2
3 18 | 3
3 19 | 4
3 20 | 5
3 21 | 6
3 22 | 7
3 23 | 8
3 24 | 9
4 25 | 0
4 26 | 1
4 27 | 2
4 28 | 3
4 29 | 4
4 30 | 5
4 31 | 6
4 32 | 7
4 33 | 8
4 34 | 9
2 Engine ended the run at 4 34
.. versionchanged:: 0.4.10
Behaviour changed, for details see https://github.com/pytorch/ignite/issues/2669
"""
self.logger.info("Terminate signaled. Engine will stop after current iteration is finished.")
self.should_terminate = True
def terminate_epoch(self) -> None:
"""Sends terminate signal to the engine, so that it terminates the current epoch. The run
continues from the next epoch. The following events are triggered:
- ...
- Event on which ``terminate_epoch`` method is called
- :attr:`~ignite.engine.events.Events.TERMINATE_SINGLE_EPOCH`
- :attr:`~ignite.engine.events.Events.EPOCH_COMPLETED`
- :attr:`~ignite.engine.events.Events.EPOCH_STARTED`
- ...
"""
self.logger.info(
"Terminate current epoch is signaled. "
"Current epoch iteration will stop after current iteration is finished."
)
self.should_terminate_single_epoch = True
def _handle_exception(self, e: BaseException) -> None:
if Events.EXCEPTION_RAISED in self._event_handlers:
self._fire_event(Events.EXCEPTION_RAISED, e)
else:
raise e
@property
def state_dict_user_keys(self) -> List:
return self._state_dict_user_keys
def state_dict(self) -> OrderedDict:
"""Returns a dictionary containing engine's state: "epoch_length", "max_epochs" and "iteration" and
other state values defined by `engine.state_dict_user_keys`
.. code-block:: python
engine = Engine(...)
engine.state_dict_user_keys.append("alpha")
engine.state_dict_user_keys.append("beta")
...
@engine.on(Events.STARTED)
def init_user_value(_):
engine.state.alpha = 0.1
engine.state.beta = 1.0
@engine.on(Events.COMPLETED)
def save_engine(_):
state_dict = engine.state_dict()
assert "alpha" in state_dict and "beta" in state_dict
torch.save(state_dict, "/tmp/engine.pt")
Returns:
OrderedDict:
a dictionary containing engine's state
"""
keys: Tuple[str, ...] = self._state_dict_all_req_keys + (self._state_dict_one_of_opt_keys[0],)
keys += tuple(self._state_dict_user_keys)
return OrderedDict([(k, getattr(self.state, k)) for k in keys])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Setups engine from `state_dict`.
State dictionary should contain keys: `iteration` or `epoch`, `max_epochs` and `epoch_length`.
If `engine.state_dict_user_keys` contains keys, they should be also present in the state dictionary.
Iteration and epoch values are 0-based: the first iteration or epoch is zero.
This method does not remove any custom attributes added by user.
Args:
state_dict: a dict with parameters
.. code-block:: python
# Restore from the 4rd epoch
state_dict = {"epoch": 3, "max_epochs": 100, "epoch_length": len(data_loader)}
# or 500th iteration
# state_dict = {"iteration": 499, "max_epochs": 100, "epoch_length": len(data_loader)}
trainer = Engine(...)
trainer.load_state_dict(state_dict)
trainer.run(data)
"""
super(Engine, self).load_state_dict(state_dict)
for k in self._state_dict_user_keys:
if k not in state_dict:
raise ValueError(
f"Required user state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'"
)
self.state.max_epochs = state_dict["max_epochs"]
self.state.epoch_length = state_dict["epoch_length"]
for k in self._state_dict_user_keys:
setattr(self.state, k, state_dict[k])
if "iteration" in state_dict:
self.state.iteration = state_dict["iteration"]
self.state.epoch = 0
if self.state.epoch_length is not None:
self.state.epoch = self.state.iteration // self.state.epoch_length
elif "epoch" in state_dict:
self.state.epoch = state_dict["epoch"]
if self.state.epoch_length is None:
raise ValueError(
"If epoch is provided in the state dict, epoch_length should not be None. "
f"Input state_dict: {state_dict}"
)
self.state.iteration = self.state.epoch_length * self.state.epoch
@staticmethod
def _is_done(state: State) -> bool:
is_done_iters = state.max_iters is not None and state.iteration >= state.max_iters
is_done_count = (
state.epoch_length is not None
and state.max_epochs is not None
and state.iteration >= state.epoch_length * state.max_epochs
)
is_done_epochs = state.max_epochs is not None and state.epoch >= state.max_epochs
return is_done_iters or is_done_count or is_done_epochs
def set_data(self, data: Union[Iterable, DataLoader]) -> None:
"""Method to set data. After calling the method the next batch passed to `processing_function` is
from newly provided data. Please, note that epoch length is not modified.
Args:
data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).
Examples:
User can switch data provider during the training:
.. code-block:: python
data1 = ...
data2 = ...
switch_iteration = 5000
def train_step(e, batch):
# when iteration <= switch_iteration
# batch is from data1
# when iteration > switch_iteration
# batch is from data2
...
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=100)
"""
self.state.dataloader = data
self._dataloader_iter = iter(self.state.dataloader)
def run(
self,
data: Optional[Iterable] = None,
max_epochs: Optional[int] = None,
max_iters: Optional[int] = None,
epoch_length: Optional[int] = None,
) -> State:
"""Runs the ``process_function`` over the passed data.
Engine has a state and the following logic is applied in this function:
- At the first call, new state is defined by `max_epochs`, `max_iters`, `epoch_length`, if provided.
A timer for total and per-epoch time is initialized when Events.STARTED is handled.
- If state is already defined such that there are iterations to run until `max_epochs` and no input arguments
provided, state is kept and used in the function.
- If state is defined and engine is "done" (no iterations to run until `max_epochs`), a new state is defined.
- If state is defined, engine is NOT "done", then input arguments if provided override defined state.
Args:
data: Collection of batches allowing repeated iteration (e.g., list or `DataLoader`). If not provided, then
``epoch_length`` is required and ``batch`` argument of ``process_function`` will be ``None``.
max_epochs: Max epochs to run for (default: None).
If a new state should be created (first run or run again from ended engine), it's default value is 1.
If run is resuming from a state, provided `max_epochs` will be taken into account and should be larger
than `engine.state.max_epochs`.
epoch_length: Number of iterations to count as one epoch. By default, it can be set as
`len(data)`. If `data` is an iterator and `epoch_length` is not set, then it will be automatically
determined as the iteration on which data iterator raises `StopIteration`.
This argument should not change if run is resuming from a state.
max_iters: Number of iterations to run for.
`max_iters` and `max_epochs` are mutually exclusive; only one of the two arguments should be provided.
Returns:
State: output state.
Note:
User can dynamically preprocess input batch at :attr:`~ignite.engine.events.Events.ITERATION_STARTED` and
store output batch in `engine.state.batch`. Latter is passed as usually to `process_function` as argument:
.. code-block:: python
trainer = ...
@trainer.on(Events.ITERATION_STARTED)
def switch_batch(engine):
engine.state.batch = preprocess_batch(engine.state.batch)
Restart the training from the beginning. User can reset `max_epochs = None`:
.. code-block:: python
# ...
trainer.run(train_loader, max_epochs=5)
# Reset model weights etc. and restart the training
trainer.state.max_epochs = None
trainer.run(train_loader, max_epochs=2)
"""
if data is not None and not isinstance(data, Iterable):
raise TypeError("Argument data should be iterable")
if self.state.max_epochs is not None:
# Check and apply overridden parameters
if max_epochs is not None:
if max_epochs < self.state.epoch:
raise ValueError(
"Argument max_epochs should be greater than or equal to the start "
f"epoch defined in the state: {max_epochs} vs {self.state.epoch}. "
"Please, set engine.state.max_epochs = None "
"before calling engine.run() in order to restart the training from the beginning."
)
self.state.max_epochs = max_epochs
if epoch_length is not None:
if epoch_length != self.state.epoch_length:
raise ValueError(
"Argument epoch_length should be same as in the state, "
f"but given {epoch_length} vs {self.state.epoch_length}"
)
if self.state.max_epochs is None or (self._is_done(self.state) and self._internal_run_generator is None):
# Create new state
if epoch_length is None:
if data is None:
raise ValueError("epoch_length should be provided if data is None")
epoch_length = self._get_data_length(data)
if epoch_length is not None and epoch_length < 1:
raise ValueError("Input data has zero size. Please provide non-empty data")
if max_iters is None:
if max_epochs is None:
max_epochs = 1
else:
if max_epochs is not None:
raise ValueError(
"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters."
)
if epoch_length is not None:
max_epochs = math.ceil(max_iters / epoch_length)
self.state.iteration = 0
self.state.epoch = 0
self.state.max_epochs = max_epochs
self.state.max_iters = max_iters
self.state.epoch_length = epoch_length
# Reset generator if previously used
self._internal_run_generator = None
self.logger.info(f"Engine run starting with max_epochs={max_epochs}.")
else:
self.logger.info(
f"Engine run resuming from iteration {self.state.iteration}, "
f"epoch {self.state.epoch} until {self.state.max_epochs} epochs"
)
if self.state.epoch_length is None and data is None:
raise ValueError("epoch_length should be provided if data is None")
if self.should_terminate:
# If engine was terminated and now is resuming from terminated state
# we need to initialize iter_counter as 0
self._init_iter = 0
if self._dataloader_iter is None:
self.state.dataloader = data
if self.interrupt_resume_enabled:
return self._internal_run()
else:
return self._internal_run_legacy()
@staticmethod
def _init_timers(state: State) -> None:
state.times[Events.EPOCH_COMPLETED.name] = 0.0
state.times[Events.COMPLETED.name] = 0.0
def _get_data_length(self, data: Iterable) -> Optional[int]:
try:
if hasattr(data, "__len__"):
return len(data) # type: ignore[arg-type]
except TypeError:
# _InfiniteConstantSampler can raise a TypeError on DataLoader length of a IterableDataset
pass
return None
def _setup_dataloader_iter(self) -> None:
if self.state.dataloader is None:
if self.state.epoch_length is None:
raise RuntimeError(
"Internal error, self.state.epoch_length is None. "
"Please, file an issue if you encounter this error."
)
self._dataloader_iter = _get_none_data_iter(self.state.epoch_length)
else:
self._dataloader_iter = iter(self.state.dataloader)
def _setup_engine(self) -> None:
self._setup_dataloader_iter()
if self._init_iter is None:
iteration = self.state.iteration
# Below we define initial counter value for _run_once_on_dataset to measure a single epoch
if self.state.epoch_length is not None:
iteration %= self.state.epoch_length
self._init_iter = iteration
def _internal_run(self) -> State:
if self._internal_run_generator is None:
self._internal_run_generator = self._internal_run_as_gen()
try:
return next(self._internal_run_generator)
except StopIteration as out:
self._internal_run_generator = None
return out.value
def _internal_run_as_gen(self) -> Generator:
self.should_terminate = self.should_terminate_single_epoch = self.should_interrupt = False
self._init_timers(self.state)
try:
try:
start_time = time.time()
self._fire_event(Events.STARTED)
yield from self._maybe_terminate_or_interrupt()
while not self._is_done(self.state) and not self.should_terminate:
self.state.epoch += 1
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_STARTED)
epoch_time_taken = time.time() - handlers_start_time
yield from self._maybe_terminate_or_interrupt()
if self._dataloader_iter is None:
self._setup_engine()
epoch_time_taken += yield from self._run_once_on_dataset_as_gen()
# time is available for handlers but must be updated after fire
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_COMPLETED)
epoch_time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
yield from self._maybe_terminate_or_interrupt()
hours, mins, secs = _to_hours_mins_secs(epoch_time_taken)
self.logger.info(
f"Epoch[{self.state.epoch}] Complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}"
)
except _EngineTerminateException:
self._fire_event(Events.TERMINATE)
time_taken = time.time() - start_time
# time is available for handlers but must be updated after fire
self.state.times[Events.COMPLETED.name] = time_taken
handlers_start_time = time.time()
self._fire_event(Events.COMPLETED)
time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.COMPLETED.name] = time_taken
hours, mins, secs = _to_hours_mins_secs(time_taken)
self.logger.info(f"Engine run complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}")
except BaseException as e:
self._dataloader_iter = None
self.logger.error(f"Engine run is terminating due to exception: {e}")
self._handle_exception(e)
self._dataloader_iter = None
return self.state
def _maybe_terminate_or_interrupt(self) -> Generator:
if self.should_terminate:
raise _EngineTerminateException()
if self.should_terminate_single_epoch:
raise _EngineTerminateSingleEpochException()
if self.should_interrupt:
self._fire_event(Events.INTERRUPT)
self.should_interrupt = False
yield self.state
def _run_once_on_dataset_as_gen(self) -> Generator[State, None, float]:
start_time = time.time()
# We need to setup iter_counter > 0 if we resume from an iteration
iter_counter = 0 if self._init_iter is None else self._init_iter
self._init_iter = None
should_exit = False
try:
if self._dataloader_iter is None:
raise RuntimeError(
"Internal error, self._dataloader_iter is None. "
"Please, file an issue if you encounter this error."
)
while True:
self.state.batch = self.state.output = None
try:
# Avoid Events.GET_BATCH_STARTED triggered twice when data iter is restarted
if self.last_event_name != Events.DATALOADER_STOP_ITERATION:
self._fire_event(Events.GET_BATCH_STARTED)
yield from self._maybe_terminate_or_interrupt()
self.state.batch = next(self._dataloader_iter)
self._fire_event(Events.GET_BATCH_COMPLETED)
yield from self._maybe_terminate_or_interrupt()
iter_counter += 1
should_exit = False
except StopIteration:
# Define self.state.epoch_length if it is not yet set
if self.state.epoch_length is None:
# Define epoch length and stop the epoch
self.state.epoch_length = iter_counter
if self.state.max_iters is not None:
self.state.max_epochs = math.ceil(self.state.max_iters / self.state.epoch_length)
break
# Should exit while loop if we can not iterate
if should_exit:
if not self._is_done(self.state):
total_iters = (
self.state.epoch_length * self.state.max_epochs
if self.state.max_epochs is not None
else self.state.max_iters
)
warnings.warn(
"Data iterator can not provide data anymore but required total number of "
"iterations to run is not reached. "
f"Current iteration: {self.state.iteration} vs Total iterations to run : {total_iters}"
)
break
self._fire_event(Events.DATALOADER_STOP_ITERATION)
yield from self._maybe_terminate_or_interrupt()
self._setup_dataloader_iter()
should_exit = True
continue
self.state.iteration += 1
self._fire_event(Events.ITERATION_STARTED)
yield from self._maybe_terminate_or_interrupt()
self.state.output = self._process_function(self, self.state.batch)
self._fire_event(Events.ITERATION_COMPLETED)
yield from self._maybe_terminate_or_interrupt()
if self.state.epoch_length is not None and iter_counter == self.state.epoch_length:
break
if self.state.max_iters is not None and self.state.iteration == self.state.max_iters:
self.should_terminate = True
raise _EngineTerminateException()
except _EngineTerminateSingleEpochException:
self._fire_event(Events.TERMINATE_SINGLE_EPOCH, iter_counter=iter_counter)
self.should_terminate_single_epoch = False
self._setup_dataloader_iter()
except _EngineTerminateException as e:
# we need to reraise this exception such that it is not handled
# as a general exception by the code below
raise e
except Exception as e:
self.logger.error(f"Current run is terminating due to exception: {e}")
self._handle_exception(e)
return time.time() - start_time
def _maybe_terminate_legacy(self) -> None:
if self.should_terminate:
raise _EngineTerminateException()
if self.should_terminate_single_epoch:
raise _EngineTerminateSingleEpochException()
def _internal_run_legacy(self) -> State:
# internal_run without generator for BC
self.should_terminate = self.should_terminate_single_epoch = self.should_interrupt = False
self._init_timers(self.state)
try:
try:
start_time = time.time()
self._fire_event(Events.STARTED)
self._maybe_terminate_legacy()
while not self._is_done(self.state) and not self.should_terminate:
self.state.epoch += 1
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_STARTED)
epoch_time_taken = time.time() - handlers_start_time
self._maybe_terminate_legacy()
if self._dataloader_iter is None:
self._setup_engine()
epoch_time_taken += self._run_once_on_dataset_legacy()
# time is available for handlers but must be updated after fire
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
handlers_start_time = time.time()
self._fire_event(Events.EPOCH_COMPLETED)
epoch_time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.EPOCH_COMPLETED.name] = epoch_time_taken
self._maybe_terminate_legacy()
hours, mins, secs = _to_hours_mins_secs(epoch_time_taken)
self.logger.info(
f"Epoch[{self.state.epoch}] Complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}"
)
except _EngineTerminateException:
self._fire_event(Events.TERMINATE)
time_taken = time.time() - start_time
# time is available for handlers but must be updated after fire
self.state.times[Events.COMPLETED.name] = time_taken
handlers_start_time = time.time()
self._fire_event(Events.COMPLETED)
time_taken += time.time() - handlers_start_time
# update time wrt handlers
self.state.times[Events.COMPLETED.name] = time_taken
hours, mins, secs = _to_hours_mins_secs(time_taken)
self.logger.info(f"Engine run complete. Time taken: {hours:02d}:{mins:02d}:{secs:06.3f}")
except BaseException as e:
self._dataloader_iter = None
self.logger.error(f"Engine run is terminating due to exception: {e}")
self._handle_exception(e)
self._dataloader_iter = None
return self.state
def _run_once_on_dataset_legacy(self) -> float:
start_time = time.time()
# We need to setup iter_counter > 0 if we resume from an iteration
iter_counter = 0 if self._init_iter is None else self._init_iter
self._init_iter = None
should_exit = False
try:
if self._dataloader_iter is None:
raise RuntimeError(
"Internal error, self._dataloader_iter is None. "
"Please, file an issue if you encounter this error."
)
while True:
self.state.batch = self.state.output = None
try:
# Avoid Events.GET_BATCH_STARTED triggered twice when data iter is restarted
if self.last_event_name != Events.DATALOADER_STOP_ITERATION:
self._fire_event(Events.GET_BATCH_STARTED)
self._maybe_terminate_legacy()
self.state.batch = next(self._dataloader_iter)
self._fire_event(Events.GET_BATCH_COMPLETED)
self._maybe_terminate_legacy()
iter_counter += 1
should_exit = False
except StopIteration:
# Define self.state.epoch_length if it is not yet set
if self.state.epoch_length is None:
# Define epoch length and stop the epoch
self.state.epoch_length = iter_counter
if self.state.max_iters is not None:
self.state.max_epochs = math.ceil(self.state.max_iters / self.state.epoch_length)
break
# Should exit while loop if we can not iterate
if should_exit:
if not self._is_done(self.state):
total_iters = (
self.state.epoch_length * self.state.max_epochs
if self.state.max_epochs is not None
else self.state.max_iters
)
warnings.warn(
"Data iterator can not provide data anymore but required total number of "
"iterations to run is not reached. "
f"Current iteration: {self.state.iteration} vs Total iterations to run : {total_iters}"
)
break
self._fire_event(Events.DATALOADER_STOP_ITERATION)
self._maybe_terminate_legacy()
self._setup_dataloader_iter()
should_exit = True
continue
self.state.iteration += 1
self._fire_event(Events.ITERATION_STARTED)
self._maybe_terminate_legacy()
self.state.output = self._process_function(self, self.state.batch)
self._fire_event(Events.ITERATION_COMPLETED)
self._maybe_terminate_legacy()
if self.state.epoch_length is not None and iter_counter == self.state.epoch_length:
break
if self.state.max_iters is not None and self.state.iteration == self.state.max_iters:
self.should_terminate = True
raise _EngineTerminateException()
except _EngineTerminateSingleEpochException:
self._fire_event(Events.TERMINATE_SINGLE_EPOCH, iter_counter=iter_counter)
self.should_terminate_single_epoch = False
self._setup_dataloader_iter()
except _EngineTerminateException as e:
# we need to reraise this exception such that it is not handled
# as a general exception by the code below
raise e
except Exception as e:
self.logger.error(f"Current run is terminating due to exception: {e}")
self._handle_exception(e)
return time.time() - start_time
def _get_none_data_iter(size: int) -> Iterator:
# Sized iterator for data as None
for _ in range(size):
yield None
class _EngineTerminateSingleEpochException(Exception):
"""
Exception associated with Terminate Single Epoch event
"""
pass
class _EngineTerminateException(Exception):
"""
Exception associated with Terminate event
"""
pass
|
import inspect
from typing import Any, Callable, Tuple, Union
def _check_signature(fn: Callable, fn_description: str, *args: Any, **kwargs: Any) -> None:
# if handler with filter, check the handler rather than the decorator
if hasattr(fn, "_parent"):
signature = inspect.signature(fn._parent())
else:
signature = inspect.signature(fn)
try: # try without engine
signature.bind(*args, **kwargs)
except TypeError as exc:
fn_params = list(signature.parameters)
exception_msg = str(exc)
passed_params = list(args) + list(kwargs)
raise ValueError(
f"Error adding {fn} '{fn_description}': "
f"takes parameters {fn_params} but will be called with {passed_params}"
f"({exception_msg})."
)
def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, float]:
"""Convert seconds to hours, mins, seconds and milliseconds."""
mins, secs = divmod(time_taken, 60)
hours, mins = divmod(mins, 60)
return round(hours), round(mins), secs
|
import warnings
from copy import deepcopy
from typing import Optional, Union
import torch.nn as nn
from ignite.engine import CallableEventWithFilter, Engine, Events, EventsList
from ignite.handlers.param_scheduler import BaseParamScheduler
from ignite.handlers.state_param_scheduler import LambdaStateScheduler
__all__ = ["EMAHandler"]
class EMAWarmUp:
def __init__(self, momentum_warmup: float, warmup_iters: int, momentum: float) -> None:
self.momentum_warmup = momentum_warmup
self.warmup_iters = warmup_iters
self.momentum = momentum
def __call__(self, event_index: int) -> float:
denominator = max(1, self.warmup_iters - 1)
curr_momentum = self.momentum_warmup + (self.momentum - self.momentum_warmup) * (event_index - 1) / denominator
if self.momentum >= self.momentum_warmup:
return min(self.momentum, curr_momentum)
else:
return max(self.momentum, curr_momentum)
class EMAHandler:
r"""Exponential moving average (EMA) handler can be used to compute a smoothed version of model.
The EMA model is updated as follows:
.. math:: \theta_{\text{EMA}, t+1} = (1 - \lambda) \cdot \theta_{\text{EMA}, t} + \lambda \cdot \theta_{t}
where :math:`\theta_{\text{EMA}, t}` and :math:`\theta_{t}` are the EMA weights and online model weights at
:math:`t`-th iteration, respectively; :math:`\lambda` is the update momentum. Current momentum can be retrieved
from ``Engine.state.ema_momentum``.
Args:
model: the online model for which an EMA model will be computed. If ``model`` is ``DataParallel`` or
``DistributedDataParallel``, the EMA smoothing will be applied to ``model.module`` .
momentum: the update momentum after warmup phase, should be float in range :math:`\left(0, 1 \right)`.
momentum_warmup: the initial update momentum during warmup phase.
warmup_iters: iterations of warmup.
handle_buffers: how to handle model buffers during training. There are three options: 1. "copy" means
copying the buffers of the online model; 2. "update" means applying EMA to the buffers of the online
model; 3. "ema_train" means set the EMA model to ``train`` mode and skip copying or updating the buffers.
Attributes:
ema_model: the exponential moving averaged model.
model: the online model that is tracked by EMAHandler. It is ``model.module`` if ``model`` in
the initialization method is an instance of ``DistributedDataParallel``.
momentum: the update momentum.
handle_buffers: how to handle model buffers during training.
Note:
The EMA model is already in ``eval`` mode if ``handle_buffers`` is "copy" or "update". If model in the
arguments is an ``nn.Module`` or ``DistributedDataParallel``, the EMA model is an ``nn.Module`` and it is on
the same device as the online model. If the model is an ``nn.DataParallel``, then the EMA model is an
``nn.DataParallel``.
Note:
It is recommended to initialize and use an EMA handler in following steps:
1. Initialize ``model`` (``nn.Module`` or ``DistributedDataParallel``) and ``ema_handler`` (``EMAHandler``).
2. Build ``trainer`` (``ignite.engine.Engine``).
3. Resume from checkpoint for ``model`` and ``ema_handler.ema_model``.
4. Attach ``ema_handler`` to ``trainer``.
Examples:
.. code-block:: python
device = torch.device("cuda:0")
model = nn.Linear(2, 1).to(device)
# update the ema every 5 iterations
ema_handler = EMAHandler(model, momentum=0.0002)
# get the ema model, which is an instance of nn.Module
ema_model = ema_handler.ema_model
trainer = Engine(train_step_fn)
to_load = {"model": model, "ema_model", ema_model, "trainer", trainer}
if resume_from is not None:
Checkpoint.load_objects(to_load, checkpoint=resume_from)
# update the EMA model every 5 iterations
ema_handler.attach(trainer, name="ema_momentum", event=Events.ITERATION_COMPLETED(every=5))
# add other handlers
to_save = to_load
ckpt_handler = Checkpoint(to_save, DiskSaver(...), ...)
trainer.add_event_handler(Events.EPOCH_COMPLETED, ckpt_handler)
# current momentum can be retrieved from engine.state,
# the attribute name is the `name` parameter used in the attach function
@trainer.on(Events.ITERATION_COMPLETED):
def print_ema_momentum(engine):
print(f"current momentum: {engine.state.ema_momentum}"
# use ema model for validation
val_step_fn = get_val_step_fn(ema_model)
evaluator = Engine(val_step_fn)
@trainer.on(Events.EPOCH_COMPLETED)
def run_validation(engine):
engine.run(val_data_loader)
trainer.run(...)
The following example shows how to perform warm-up to the EMA momentum:
.. code-block:: python
device = torch.device("cuda:0")
model = nn.Linear(2, 1).to(device)
# linearly change the EMA momentum from 0.2 to 0.002 in the first 100 iterations,
# then keep a constant EMA momentum of 0.002 afterwards
ema_handler = EMAHandler(model, momentum=0.002, momentum_warmup=0.2, warmup_iters=100)
engine = Engine(step_fn)
ema_handler.attach(engine, name="ema_momentum")
engine.run(...)
The following example shows how to attach two handlers to the same trainer:
.. code-block:: python
generator = build_generator(...)
discriminator = build_discriminator(...)
gen_handler = EMAHandler(generator)
disc_handler = EMAHandler(discriminator)
step_fn = get_step_fn(...)
engine = Engine(step_fn)
# update EMA model of generator every 1 iteration
gen_handler.attach(engine, "gen_ema_momentum", event=Events.ITERATION_COMPLETED)
# update EMA model of discriminator every 2 iteration
disc_handler.attach(engine, "dis_ema_momentum", event=Events.ITERATION_COMPLETED(every=2))
@engine.on(Events.ITERATION_COMPLETED)
def print_ema_momentum(engine):
print(f"current momentum for generator: {engine.state.gen_ema_momentum}")
print(f"current momentum for discriminator: {engine.state.disc_ema_momentum}")
engine.run(...)
.. versionadded:: 0.4.6
"""
def __init__(
self,
model: nn.Module,
momentum: float = 0.0002,
momentum_warmup: Optional[float] = None,
warmup_iters: Optional[int] = None,
handle_buffers: str = "copy",
) -> None:
if not 0 < momentum < 1:
raise ValueError(f"Invalid momentum: {momentum}")
self.momentum = momentum
self._momentum_lambda_obj: Optional[EMAWarmUp] = None
if momentum_warmup is not None and warmup_iters is not None:
self.momentum_scheduler: Optional[BaseParamScheduler] = None
self._momentum_lambda_obj = EMAWarmUp(momentum_warmup, warmup_iters, momentum)
if not isinstance(model, nn.Module):
raise ValueError(
f"model should be an instance of nn.Module or its subclasses, but got"
f"model: {model.__class__.__name__}"
)
if isinstance(model, nn.parallel.DistributedDataParallel):
model = model.module
self.model = model
self.ema_model = deepcopy(self.model)
for param in self.ema_model.parameters():
param.detach_()
if handle_buffers not in ("copy", "update", "ema_train"):
raise ValueError(
f"handle_buffers can only be one of 'copy', 'update', 'ema_train', " f"but got {handle_buffers}"
)
self.handle_buffers = handle_buffers
if self.handle_buffers == "ema_train":
self.ema_model.train()
else:
self.ema_model.eval()
def _update_ema_model(self, engine: Engine, name: str) -> None:
"""Update weights of ema model"""
momentum = getattr(engine.state, name)
for ema_p, model_p in zip(self.ema_model.parameters(), self.model.parameters()):
ema_p.mul_(1.0 - momentum).add_(model_p.data, alpha=momentum)
if self.handle_buffers == "update":
for ema_b, model_b in zip(self.ema_model.buffers(), self.model.buffers()):
try:
ema_b.mul_(1.0 - momentum).add_(model_b.data, alpha=momentum)
except RuntimeError:
# Handle the case where ema_b is torch.int64, torch.int32 etc.,
# where a runtime error will be thrown when performing the in-place operations with floats.
# In this case, just copy the data
ema_b.data = model_b.data
elif self.handle_buffers == "copy":
# assign the buffers
for ema_b, model_b in zip(self.ema_model.buffers(), self.model.buffers()):
ema_b.data = model_b.data
else:
pass
def attach(
self,
engine: Engine,
name: str = "ema_momentum",
warn_if_exists: bool = True,
event: Union[str, Events, CallableEventWithFilter, EventsList] = Events.ITERATION_COMPLETED,
) -> None:
"""Attach the handler to engine. After the handler is attached, the ``Engine.state`` will add an new attribute
with name ``name`` if the attribute does not exist. Then, the current momentum can be retrieved from
``Engine.state`` when the engine runs.
Note:
There are two cases where a momentum with name ``name`` already exists: 1. the engine has loaded its
state dict after resuming. In this case, there is no need to initialize the momentum again, and users
can set ``warn_if_exists`` to False to suppress the warning message; 2. another handler has created
a state attribute with the same name. In this case, users should choose another name for the ema momentum.
Args:
engine: trainer to which the handler will be attached.
name: attribute name for retrieving EMA momentum from ``Engine.state``. It should be a unique name since a
trainer can have multiple EMA handlers.
warn_if_exists: if True, a warning will be thrown if the momentum with name ``name`` already exists.
event: event when the EMA momentum and EMA model are updated.
"""
if hasattr(engine.state, name):
if warn_if_exists:
warnings.warn(
f"Attribute '{name}' already exists in Engine.state. It might because 1. the engine has loaded its "
f"state dict or 2. {name} is already created by other handlers. Turn off this warning by setting"
f"warn_if_exists to False.",
category=UserWarning,
)
else:
setattr(engine.state, name, self.momentum)
if self._momentum_lambda_obj is not None:
self.momentum_scheduler = LambdaStateScheduler(self._momentum_lambda_obj, param_name="ema_momentum")
# first update the momentum and then update the EMA model
self.momentum_scheduler.attach(engine, event)
engine.add_event_handler(event, self._update_ema_model, name)
|
import itertools
import math
import numbers
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from copy import copy
from pathlib import Path
from typing import Any, cast, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
# https://github.com/pytorch/ignite/issues/2773
try:
from torch.optim.lr_scheduler import LRScheduler as PyTorchLRScheduler
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as PyTorchLRScheduler
from ignite.engine import Engine
class BaseParamScheduler(metaclass=ABCMeta):
r"""An abstract class for updating an engine state or optimizer's parameter value during
training.
Args:
param_name: name of engine state or optimizer's parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
.. versionadded:: 0.4.7
"""
def __init__(self, param_name: str, save_history: bool = False):
self.param_name = param_name
self.event_index = 0
self._save_history = save_history
self._state_attrs = ["event_index", "param_name", "save_history"]
@property
def save_history(self) -> bool:
return self._save_history
@save_history.setter
def save_history(self, value: bool) -> None:
self._save_history = value
def state_dict(self) -> Dict[str, Any]:
"""Returns a dictionary containing a whole state of BaseParamScheduler.
Returns:
dict:
a dictionary containing a whole state of BaseParamScheduler
"""
destination = OrderedDict()
for name in self._state_attrs:
if hasattr(self, name):
val = getattr(self, name)
if hasattr(val, "state_dict"):
val = val.state_dict()
destination[name] = copy(val)
return destination
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this BaseParamScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
for name in self._state_attrs:
if name not in state_dict:
raise ValueError(
f"Required state attribute '{name}' is absent in provided state_dict '{state_dict.keys()}'"
)
val = state_dict[name]
obj = getattr(self, name)
if isinstance(val, Mapping) and hasattr(obj, "load_state_dict"):
obj.load_state_dict(val)
else:
setattr(self, name, val)
@abstractmethod
def get_param(self) -> Union[List[float], float]:
"""Method to get current parameter values
Returns:
list of params, or scalar param
"""
pass
@classmethod
@abstractmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
"""
pass
@classmethod
def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any:
"""Method to plot simulated scheduled values during `num_events` events.
This class requires `matplotlib package <https://matplotlib.org/>`_ to be installed:
.. code-block:: bash
pip install matplotlib
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
matplotlib.lines.Line2D
Examples:
.. code-block:: python
import matplotlib.pylab as plt
plt.figure(figsize=(10, 7))
LinearCyclicalScheduler.plot_values(num_events=50, param_name='lr',
start_value=1e-1, end_value=1e-3, cycle_size=10))
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ModuleNotFoundError(
"This method requires matplotlib to be installed. "
"Please install it with command: \n pip install matplotlib"
)
values = cls.simulate_values(num_events=num_events, **scheduler_kwargs)
label = scheduler_kwargs.get("param_name", "learning rate")
ax = plt.plot([e for e, _ in values], [v for _, v in values], label=label)
plt.legend()
plt.grid(which="both")
return ax
class ParamScheduler(BaseParamScheduler):
"""An abstract class for updating an optimizer's parameter value during
training.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use
Note:
Parameter scheduler works independently of the internal state of the attached optimizer.
More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
sets defined absolute values.
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(ParamScheduler, self).__init__(param_name, save_history)
if not (
isinstance(optimizer, Optimizer)
or (hasattr(optimizer, "param_groups") and isinstance(optimizer.param_groups, Sequence))
):
raise TypeError(
"Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, "
f"but given {type(optimizer)}"
)
self.optimizer = optimizer
self.param_group_index = param_group_index
self._state_attrs += ["param_group_index"]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
value = self.get_param()
if isinstance(value, list):
if len(value) != len(self.optimizer_param_groups):
raise ValueError(
"size of value is different than optimizer_param_groups "
f"{len(value)} != {len(self.optimizer_param_groups)}"
)
for i, param_group in enumerate(self.optimizer_param_groups):
param_group[self.param_name] = value[i]
else:
for i, param_group in enumerate(self.optimizer_param_groups):
param_group[self.param_name] = value
if name is None:
name = self.param_name
if self.save_history and engine:
if not hasattr(engine.state, "param_history") or engine.state.param_history is None:
setattr(engine.state, "param_history", {})
engine.state.param_history.setdefault(name, []) # type: ignore[attr-defined]
values = [pg[self.param_name] for pg in self.optimizer_param_groups]
engine.state.param_history[name].append(values) # type: ignore[attr-defined]
self.event_index += 1
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
if self.param_group_index is None:
return self.optimizer.param_groups
return [self.optimizer.param_groups[self.param_group_index]]
@classmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
Examples:
.. code-block:: python
lr_values = np.array(LinearCyclicalScheduler.simulate_values(num_events=50, param_name='lr',
start_value=1e-1, end_value=1e-3,
cycle_size=10))
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
plt.xlabel("events")
plt.ylabel("values")
plt.legend()
"""
keys_to_remove = ["optimizer", "save_history"]
for key in keys_to_remove:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(optimizer=_get_fake_optimizer(), save_history=False, **scheduler_kwargs)
for i in range(num_events):
scheduler(engine=None)
values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
return values
class CyclicalScheduler(ParamScheduler):
"""An abstract class for updating an optimizer's parameter value over a
cycle of some size.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the middle of the cycle.
cycle_size: length of cycle, value should be larger than 1.
cycle_mult: ratio by which to change the cycle_size.
at the end of each cycle (default=1.0).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
.. versionadded:: 0.4.5
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
start_value: float,
end_value: float,
cycle_size: int,
cycle_mult: float = 1.0,
start_value_mult: float = 1.0,
end_value_mult: float = 1.0,
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(CyclicalScheduler, self).__init__(
optimizer, param_name, save_history=save_history, param_group_index=param_group_index
)
self.start_value = start_value
self.end_value = end_value
self.cycle_size = int(cycle_size) # Ensure cycle_size is integer
self.cycle_mult = cycle_mult
self.cycle = 0
self.start_value_mult = start_value_mult
self.end_value_mult = end_value_mult
if self.cycle_size < 2:
raise ValueError(f"Argument cycle_size should be positive and larger than 1, but given {cycle_size}")
self._state_attrs += [
"start_value",
"end_value",
"cycle_size",
"cycle_mult",
"cycle",
"start_value_mult",
"end_value_mult",
]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
if self.event_index != 0 and self.event_index % self.cycle_size == 0:
self.event_index = 0
self.cycle_size = int(self.cycle_size * self.cycle_mult)
self.cycle += 1
self.start_value *= self.start_value_mult
self.end_value *= self.end_value_mult
return super(CyclicalScheduler, self).__call__(engine, name)
class LinearCyclicalScheduler(CyclicalScheduler):
"""Linearly adjusts param value to 'end_value' for a half-cycle, then linearly
adjusts it back to 'start_value' for a half-cycle.
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the middle of the cycle.
cycle_size: length of cycle.
cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
# Linearly increases the learning rate from 0.0 to 1.0 and back to 0.0
# over a cycle of 4 iterations
scheduler = LinearCyclicalScheduler(default_optimizer, "lr", 0.0, 1.0, 4)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 1
0.0
0.5
1.0
0.5
...
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# Linearly increases the learning rate from 0.0 to 1.0 and back to 0.0
# over a cycle of 4 iterations
scheduler1 = LinearCyclicalScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)
# Linearly increases the learning rate from 0.0 to 0.1 and back to 0.0
# over a cycle of 4 iterations
scheduler2 = LinearCyclicalScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr (base)"],
optimizer.param_groups[1]["lr (fc)"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 2
0.0 0.0
0.5 0.05
1.0 0.1
0.5 0.05
...
.. versionadded:: 0.4.5
"""
def get_param(self) -> float:
cycle_progress = self.event_index / self.cycle_size
return self.end_value + (self.start_value - self.end_value) * abs(cycle_progress - 0.5) * 2
class CosineAnnealingScheduler(CyclicalScheduler):
"""Anneals 'start_value' to 'end_value' over each cycle.
The annealing takes the form of the first half of a cosine
wave (as suggested in [Smith17]_).
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
start_value: value at start of cycle.
end_value: value at the end of the cycle.
cycle_size: length of cycle.
cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
usually be the number of batches in an epoch.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler = CosineAnnealingScheduler(default_optimizer, "lr", 0.0, 1.0, 4)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 1
0.0
0.1464...
0.4999...
0.8535...
...
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr (base)", 0.0, 1.0, 4, param_group_index=0)
# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr (fc)", 0.0, 0.1, 4, param_group_index=1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler_2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr (base)"],
optimizer.param_groups[1]["lr (fc)"])
default_trainer.run([0] * 9, max_epochs=1)
.. testoutput:: 2
0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...
.. [Smith17] Smith, Leslie N. "Cyclical learning rates for training neural networks."
Applications of Computer Vision (WACV), 2017 IEEE Winter Conference on. IEEE, 2017
.. versionadded:: 0.4.5
"""
def get_param(self) -> float:
"""Method to get current optimizer's parameter value"""
cycle_progress = self.event_index / self.cycle_size
return self.start_value + ((self.end_value - self.start_value) / 2) * (1 - math.cos(math.pi * cycle_progress))
class ConcatScheduler(ParamScheduler):
"""Concat a list of parameter schedulers.
The `ConcatScheduler` goes through a list of schedulers given by `schedulers`. Duration of each
scheduler is defined by `durations` list of integers.
Args:
schedulers: list of parameter schedulers.
durations: list of number of events that lasts a parameter scheduler from schedulers.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
scheduler_1 = LinearCyclicalScheduler(default_optimizer, "lr", 0.0, 1.0, 8)
scheduler_2 = CosineAnnealingScheduler(default_optimizer, "lr", 1.0, 0.2, 4)
# Sets the Learning rate linearly from 0.0 to 1.0 over 4 iterations. Then
# starts an annealing schedule from 1.0 to 0.2 over the next 4 iterations.
# The annealing cycles are repeated indefinitely.
combined_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[4, ])
default_trainer.add_event_handler(Events.ITERATION_STARTED, combined_scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0
0.25
0.5
0.75
1.0
0.8828...
0.6000...
0.3171...
.. versionadded:: 0.4.5
"""
def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False):
if not isinstance(schedulers, Sequence):
raise TypeError(f"Argument schedulers should be a sequence, but given {schedulers}")
if len(schedulers) < 2:
raise ValueError(
f"Argument schedulers should be of more than one parameter schedulers, but given {schedulers}"
)
if not isinstance(durations, (list, tuple)):
raise TypeError(f"Argument durations should be list/tuple, but given {durations}")
if not all([isinstance(t, numbers.Integral) for t in durations]):
raise ValueError(f"Argument durations should be list/tuple of integers, but given {durations}")
if len(schedulers) != len(durations) + 1:
raise ValueError(
"Incorrect number schedulers or duration values, " f"given {len(schedulers)} and {len(durations)}"
)
for i, scheduler in enumerate(schedulers):
if not isinstance(scheduler, ParamScheduler) and not isinstance(scheduler, ParamGroupScheduler):
raise TypeError(
f"Value at index {i} of schedulers should be a parameter scheduler, but given {type(scheduler)}"
)
self.schedulers = schedulers
self.durations = durations
tmp_optimizers = [s.optimizer for s in self.schedulers]
tmp_list_optimizers = [s if isinstance(s, list) else [s] for s in tmp_optimizers]
param_optimizers = list(itertools.chain(*tmp_list_optimizers))
optimizer = list(set(param_optimizers))
if len(optimizer) != 1:
raise ValueError("schedulers should be related to same optimizer")
tmp_param_names = [s.param_name for s in self.schedulers]
tmp_list_param_names = [s if isinstance(s, list) else [s] for s in tmp_param_names]
param_names = list(itertools.chain(*tmp_list_param_names))
param_name = list(set(param_names))
if len(param_name) != 1:
raise ValueError("schedulers should be related to same param_name")
# schedulers should have save_history sync with ParamGroupScheduler
for s in schedulers:
s.save_history = save_history
super(ConcatScheduler, self).__init__(
optimizer=optimizer[0], param_name=param_name[0], save_history=save_history
)
self._scheduler_index = 0
self._setup_scheduler()
self._state_attrs += ["_current_duration", "durations", "_scheduler_index"]
def state_dict(self) -> Dict[str, Any]:
"""Returns a dictionary containing a whole state of ConcatScheduler.
Returns:
dict:
a dictionary containing a whole state of ConcatScheduler
"""
state_dict = super(ConcatScheduler, self).state_dict()
state_dict["schedulers"] = []
for s in self.schedulers:
state_dict["schedulers"].append(s.state_dict())
return state_dict
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ConcatScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
if "schedulers" not in state_dict:
raise ValueError(
f"Required state attribute 'schedulers' is absent in provided state_dict '{state_dict.keys()}'"
)
sds = state_dict["schedulers"]
if len(sds) != len(self.schedulers):
raise ValueError(
f"Input state_dict contains {len(sds)} state_dicts of concatenated schedulers, "
f"but {len(self.schedulers)} needed"
)
for s, sd in zip(self.schedulers, sds):
s.load_state_dict(sd)
super(ConcatScheduler, self).load_state_dict(state_dict)
self._setup_scheduler()
def _setup_scheduler(self) -> None:
self._current_scheduler = self.schedulers[self._scheduler_index]
self._current_duration = (
self.durations[self._scheduler_index] if self._scheduler_index < len(self.durations) else -1
)
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
if self._current_duration == 0:
self._scheduler_index += 1
self._setup_scheduler()
self._current_scheduler(engine, name)
self._current_duration -= 1
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
# We need to setup optimizer_param_groups as property
# to synchonize with the latest _current_scheduler and its internal optimizer_param_groups
return self._current_scheduler.optimizer_param_groups
@property
def save_history(self) -> bool:
return self._current_scheduler.save_history
@save_history.setter
def save_history(self, value: bool) -> None:
for s in self.schedulers:
s.save_history = value
def get_param(self) -> Union[List[float], float]:
return self._current_scheduler.get_param()
@classmethod
def simulate_values( # type: ignore[override]
cls,
num_events: int,
schedulers: List[ParamScheduler],
durations: List[int],
param_names: Optional[Union[List[str], Tuple[str]]] = None,
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
schedulers: list of parameter schedulers.
durations: list of number of events that lasts a parameter scheduler from schedulers.
param_names: parameter name or list of parameter names to simulate values.
By default, the first scheduler's parameter name is taken.
Returns:
list:
list of [event_index, value_0, value_1, ...], where values correspond to `param_names`.
"""
if param_names is not None:
if not isinstance(param_names, (list, tuple)):
raise TypeError(f"Argument param_names should be list or tuple, but given {type(param_names)}")
if not all(isinstance(item, str) for item in param_names):
raise ValueError(f"Argument param_names should be list or tuple of strings, but given {param_names}")
tmp_param_optimizers = [s.optimizer for s in schedulers]
tmp_list_param_optimizers = [s if isinstance(s, list) else [s] for s in tmp_param_optimizers]
param_optimizers = list(itertools.chain(*tmp_list_param_optimizers))
tmp_optimizer = list(set(param_optimizers))
if len(tmp_optimizer) != 1:
raise ValueError("schedulers should be related to same optimizer")
optimizer = tmp_optimizer[0]
# This scheduler uses `ParamScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)}
# all schedulers should be related to the same optimizer
objs["optimizer"] = optimizer.state_dict()
torch.save(objs, cache_filepath.as_posix())
# do not save_history
for s in schedulers:
s.save_history = False
output = []
scheduler = cls(schedulers=schedulers, save_history=False, durations=durations)
if param_names is None:
param_names = [scheduler.param_name]
for i in range(num_events):
scheduler(engine=None)
values = [i]
for param_name in param_names:
params = [p[param_name] for p in scheduler.optimizer_param_groups]
values = values + params
output.append(values)
objs = torch.load(cache_filepath.as_posix())
for i, s in enumerate(schedulers):
s.load_state_dict(objs[f"lr_scheduler_{i}"])
optimizer.load_state_dict(objs["optimizer"])
return output
class _CosineAnnealingWarmRestarts:
def __init__(self, lr_scheduler: CosineAnnealingWarmRestarts):
self._lr_scheduler = lr_scheduler
@property
def last_epoch(self) -> int:
return self._lr_scheduler.last_epoch
@last_epoch.setter
def last_epoch(self, value: int) -> None:
self._lr_scheduler.last_epoch = value
@property
def optimizer(self) -> torch.optim.Optimizer:
return self._lr_scheduler.optimizer
def get_lr(self, epoch: Optional[int] = None) -> List[float]:
T_mult = self._lr_scheduler.T_mult
eta_min = self._lr_scheduler.eta_min
if epoch is None and self.last_epoch < 0:
epoch = 0
if epoch is None:
epoch = self.last_epoch + 1
self._lr_scheduler.T_cur = self._lr_scheduler.T_cur + 1
if self._lr_scheduler.T_cur >= self._lr_scheduler.T_i:
self._lr_scheduler.T_cur = self._lr_scheduler.T_cur - self._lr_scheduler.T_i
self._lr_scheduler.T_i = self._lr_scheduler.T_i * T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self._lr_scheduler.T_0:
if T_mult == 1:
self._lr_scheduler.T_cur = epoch % self._lr_scheduler.T_0
else:
n = int(math.log((epoch / self._lr_scheduler.T_0 * (T_mult - 1) + 1), T_mult))
self._lr_scheduler.T_cur = epoch - self._lr_scheduler.T_0 * (T_mult**n - 1) / (T_mult - 1)
self._lr_scheduler.T_i = self._lr_scheduler.T_0 * T_mult**n
else:
self._lr_scheduler.T_i = self._lr_scheduler.T_0
self._lr_scheduler.T_cur = epoch
self.last_epoch = math.floor(epoch)
return [
eta_min
+ (base_lr - eta_min) * (1 + math.cos(math.pi * self._lr_scheduler.T_cur / self._lr_scheduler.T_i)) / 2
for base_lr in self._lr_scheduler.base_lrs
]
class LRScheduler(ParamScheduler):
"""A wrapper class to call `torch.optim.lr_scheduler` objects as `ignite` handlers.
Args:
lr_scheduler: lr_scheduler object to wrap.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
use_legacy: if True, scheduler should be attached to ``Events.ITERATION_COMPLETED``, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
from torch.optim.lr_scheduler import StepLR
torch_lr_scheduler = StepLR(default_optimizer, step_size=3, gamma=0.1)
scheduler = LRScheduler(torch_lr_scheduler)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.1
0.1
0.1
0.010...
0.010...
0.010...
0.001...
0.001...
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.9
added `use_legacy` argument
"""
def __init__(
self,
lr_scheduler: PyTorchLRScheduler,
save_history: bool = False,
use_legacy: bool = False,
):
if not isinstance(lr_scheduler, PyTorchLRScheduler):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__}, "
f"but given {type(lr_scheduler)}"
)
self.lr_scheduler: Union[PyTorchLRScheduler, _CosineAnnealingWarmRestarts] = lr_scheduler
if isinstance(lr_scheduler, CosineAnnealingWarmRestarts):
self.lr_scheduler = _CosineAnnealingWarmRestarts(lr_scheduler)
super(LRScheduler, self).__init__(
optimizer=self.lr_scheduler.optimizer,
param_name="lr",
save_history=save_history,
)
if use_legacy:
warnings.warn(
"Please make sure to attach scheduler to Events.ITERATION_COMPLETED "
"instead of Events.ITERATION_STARTED to make sure to use "
"the first lr value from the optimizer, otherwise it will be skipped"
)
self.lr_scheduler.last_epoch += 1
self._state_attrs += ["lr_scheduler"]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
super(LRScheduler, self).__call__(engine, name)
self.lr_scheduler.last_epoch += 1
def get_param(self) -> Union[float, List[float]]:
"""Method to get current optimizer's parameter value"""
# Emulate context manager for pytorch>=1.4
self.lr_scheduler._get_lr_called_within_step = True # type: ignore[union-attr]
lr_list = cast(List[float], self.lr_scheduler.get_lr())
self.lr_scheduler._get_lr_called_within_step = False # type: ignore[union-attr]
if len(lr_list) == 1:
return lr_list[0]
else:
return lr_list
@classmethod
def simulate_values( # type: ignore[override]
cls, num_events: int, lr_scheduler: PyTorchLRScheduler, **kwargs: Any
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
lr_scheduler: lr_scheduler object to wrap.
Returns:
event_index, value
"""
if not isinstance(lr_scheduler, PyTorchLRScheduler):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__}, "
f"but given {type(lr_scheduler)}"
)
# This scheduler uses `torch.optim.lr_scheduler.LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
obj = {
"lr_scheduler": lr_scheduler.state_dict(),
"optimizer": lr_scheduler.optimizer.state_dict(),
}
torch.save(obj, cache_filepath.as_posix())
values = []
scheduler = cls(save_history=False, lr_scheduler=lr_scheduler, **kwargs)
for i in range(num_events):
scheduler(engine=None)
params = [p[scheduler.param_name] for p in scheduler.optimizer_param_groups]
values.append([i] + params)
obj = torch.load(cache_filepath.as_posix())
lr_scheduler.load_state_dict(obj["lr_scheduler"])
lr_scheduler.optimizer.load_state_dict(obj["optimizer"])
return values
def create_lr_scheduler_with_warmup(
lr_scheduler: Union[ParamScheduler, PyTorchLRScheduler],
warmup_start_value: float,
warmup_duration: int,
warmup_end_value: Optional[float] = None,
save_history: bool = False,
output_simulated_values: Optional[List] = None,
) -> "ConcatScheduler":
"""
Helper method to create a learning rate scheduler with a linear warm-up.
Args:
lr_scheduler: learning rate scheduler after the warm-up.
warmup_start_value: learning rate start value of the warm-up phase.
warmup_duration: warm-up phase duration, number of events.
warmup_end_value: learning rate end value of the warm-up phase, (default=None). If None,
warmup_end_value is set to optimizer initial lr.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values: optional output of simulated learning rate values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated learning rate values.
Returns:
ConcatScheduler
Note:
If the first learning rate value provided by `lr_scheduler` is different from `warmup_end_value`, an additional
event is added after the warm-up phase such that the warm-up ends with `warmup_end_value` value and then
`lr_scheduler` provides its learning rate values as normally.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
from torch.optim.lr_scheduler import ExponentialLR
torch_lr_scheduler = ExponentialLR(optimizer=default_optimizer, gamma=0.98)
default_trainer = get_default_trainer()
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=3)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0
0.05
0.1
0.098
0.09604
0.09411...
0.09223...
0.09039...
.. versionadded:: 0.4.5
"""
if not isinstance(lr_scheduler, (ParamScheduler, PyTorchLRScheduler)):
raise TypeError(
"Argument lr_scheduler should be a subclass of "
f"torch.optim.lr_scheduler.{PyTorchLRScheduler.__name__} or ParamScheduler, "
f"but given {type(lr_scheduler)}"
)
if not isinstance(warmup_duration, numbers.Integral):
raise TypeError(f"Argument warmup_duration should be integer, but given {warmup_duration}")
if not (warmup_duration > 1):
raise ValueError(f"Argument warmup_duration should be at least 2 events, but given {warmup_duration}")
warmup_schedulers: List[ParamScheduler] = []
for param_group_index, param_group in enumerate(lr_scheduler.optimizer.param_groups):
if warmup_end_value is None:
param_group_warmup_end_value = param_group["lr"]
else:
param_group_warmup_end_value = warmup_end_value
milestones_values = [(0, warmup_start_value), (warmup_duration - 1, param_group_warmup_end_value)]
if isinstance(lr_scheduler, PyTorchLRScheduler):
init_lr = param_group["lr"]
if init_lr != param_group_warmup_end_value:
milestones_values.append((warmup_duration, init_lr))
# We need to advance torch lr_scheduler to avoid duplicated lr value
# given by PiecewiseLinear and LRScheduler.
# We suggest to attach output scheduler on ITERATION_STARTED but
# torch lr_scheduler works with ITERATION_COMPLETED
# See also https://github.com/pytorch/ignite/pull/2496#issuecomment-1065984440
lr_scheduler.last_epoch += 1
lr_scheduler = LRScheduler(lr_scheduler, save_history=save_history)
else:
init_lr = lr_scheduler.get_param()
if init_lr == param_group_warmup_end_value:
if warmup_duration > 2:
d = (param_group_warmup_end_value - warmup_start_value) / (warmup_duration - 1)
milestones_values[-1] = (warmup_duration - 2, param_group_warmup_end_value - d)
else:
milestones_values.pop(-1)
warmup_schedulers.append(
PiecewiseLinear(
lr_scheduler.optimizer,
param_name="lr",
milestones_values=milestones_values,
param_group_index=param_group_index,
save_history=save_history,
)
)
warmup_scheduler = ParamGroupScheduler(warmup_schedulers, save_history=save_history)
schedulers: List[Union[ParamScheduler, ParamGroupScheduler, PyTorchLRScheduler]] = [
warmup_scheduler,
lr_scheduler,
]
durations = [milestones_values[-1][0] + 1]
combined_scheduler = ConcatScheduler(schedulers, durations=durations, save_history=save_history)
if output_simulated_values is not None:
if not isinstance(output_simulated_values, list):
raise TypeError(
"Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, "
f"but given {type(output_simulated_values)}."
)
num_events = len(output_simulated_values)
result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations)
for i in range(num_events):
output_simulated_values[i] = result[i]
return combined_scheduler
class PiecewiseLinear(ParamScheduler):
"""
Piecewise linear parameter scheduler
Args:
optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name: name of optimizer's parameter to update.
milestones_values: list of tuples (event index, parameter value)
represents milestones and parameter. Milestones should be increasing integers.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
param_group_index: optimizer's parameters group to use.
.. code-block:: python
scheduler = PiecewiseLinear(optimizer, "lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)])
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
#
# Sets the learning rate to 0.5 over the first 10 iterations, then decreases linearly from 0.5 to 0.45 between
# 10th and 20th iterations. Next there is a jump to 0.3 at the 21st iteration and LR decreases linearly
# from 0.3 to 0.1 between 21st and 30th iterations and remains 0.1 until the end of the iterations.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
milestones_values = [(1, 1.0), (3, 0.8), (5, 0.2)]
scheduler = PiecewiseLinear(
default_optimizer, "lr", milestones_values=milestones_values)
# Sets lr equal to 1 for till the first iteration
# Then linearly reduces lr from 1 to 0.8 till the third iteration
# Then linearly reduces lr from 0.8 to 0.5 till the fifth iteration
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
default_trainer.run([0] * 6, max_epochs=1)
.. testoutput:: 1
1.0
1.0
0.9
0.8
0.5
0.2
.. testcode:: 2
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.1},
{"params": default_model.fc.parameters(), "lr": 1.0},
]
)
milestones_values1 = [(1, 0.1), (3, 0.08), (5, 0.02)]
scheduler2 = PiecewiseLinear(
optimizer, "lr", milestones_values=milestones_values1, param_group_index=0)
# Sets lr equal to 0.1 for till the first iteration
# Then linearly reduces lr from 0.1 to 0.08 till the third iteration
# Then linearly reduces lr from 0.08 to 0.05 till the fifth iteration
milestones_values2 = [(1, 1.0), (3, 0.8), (5, 0.2)]
scheduler1 = PiecewiseLinear(
optimizer, "lr", milestones_values=milestones_values2, param_group_index=1)
# Sets lr equal to 1 for till the first iteration
# Then linearly reduces lr from 1 to 0.8 till the third iteration
# Then linearly reduces lr from 0.8 to 0.5 till the fifth iteration
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler2)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"],
optimizer.param_groups[1]["lr"])
default_trainer.run([0] * 6, max_epochs=1)
.. testoutput:: 2
0.1 1.0
0.1 1.0
0.09 0.9
0.08 0.8
0.05 0.5
0.02 0.2
.. versionadded:: 0.4.5
"""
def __init__(
self,
optimizer: Optimizer,
param_name: str,
milestones_values: List[Tuple[int, float]],
save_history: bool = False,
param_group_index: Optional[int] = None,
):
super(PiecewiseLinear, self).__init__(optimizer, param_name, save_history, param_group_index=param_group_index)
if not isinstance(milestones_values, Sequence):
raise TypeError(
f"Argument milestones_values should be a list or tuple, but given {type(milestones_values)}"
)
if len(milestones_values) < 1:
raise ValueError(
f"Argument milestones_values should be with at least one value, but given {milestones_values}"
)
values: List[float] = []
milestones: List[int] = []
for pair in milestones_values:
if not isinstance(pair, tuple) or len(pair) != 2:
raise ValueError("Argument milestones_values should be a list of pairs (milestone, param_value)")
if not isinstance(pair[0], numbers.Integral):
raise TypeError(f"Value of a milestone should be integer, but given {type(pair[0])}")
if len(milestones) > 0 and pair[0] < milestones[-1]:
raise ValueError(
f"Milestones should be increasing integers, but given {pair[0]} is smaller "
f"than the previous milestone {milestones[-1]}"
)
milestones.append(pair[0])
values.append(pair[1])
self.values = values
self.milestones = milestones
self._index = 0
self._state_attrs += ["values", "milestones", "_index"]
def _get_start_end(self) -> Tuple[int, int, float, float]:
if self.milestones[0] > self.event_index:
return self.event_index - 1, self.event_index, self.values[0], self.values[0]
elif self.milestones[-1] <= self.event_index:
return (self.event_index, self.event_index + 1, self.values[-1], self.values[-1])
elif self.milestones[self._index] <= self.event_index < self.milestones[self._index + 1]:
return (
self.milestones[self._index],
self.milestones[self._index + 1],
self.values[self._index],
self.values[self._index + 1],
)
else:
self._index += 1
return self._get_start_end()
def get_param(self) -> float:
start_index, end_index, start_value, end_value = self._get_start_end()
return start_value + (end_value - start_value) * (self.event_index - start_index) / (end_index - start_index)
class ParamGroupScheduler:
"""
Scheduler helper to group multiple schedulers into one.
Args:
schedulers: list/tuple of parameter schedulers.
names: list of names of schedulers.
save_history: whether to save history or not.
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
optimizer = torch.optim.SGD(
[
{"params": default_model.base.parameters(), "lr": 0.001},
{"params": default_model.fc.parameters(), "lr": 0.01},
]
)
# CosineAnnealing increases the learning rate from 0.0 to 1.0
# over a cycle of 4 iterations
scheduler_1 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 1.0, 4, param_group_index=0)
# CosineAnnealing increases the learning rate from 0.0 to 0.1
# over a cycle of 4 iterations
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", 0.0, 0.1, 4, param_group_index=1)
scheduler = ParamGroupScheduler(schedulers=[scheduler_1, scheduler_2],
names=["lr (base)", "lr (fc)"])
default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"],
optimizer.param_groups[1]["lr"])
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
0.0 0.0
0.1464... 0.01464...
0.4999... 0.04999...
0.8535... 0.08535...
...
.. versionadded:: 0.4.5
"""
def __init__(self, schedulers: List[ParamScheduler], names: Optional[List[str]] = None, save_history: bool = False):
if not isinstance(schedulers, Sequence):
raise TypeError(f"Argument schedulers should be a list/tuple, but given {schedulers}")
if not all(isinstance(scheduler, ParamScheduler) for scheduler in schedulers):
raise ValueError(
f"Argument schedulers should be a list/tuple of parameter schedulers, but given {schedulers}"
)
if names is None:
names = [s.param_name for s in schedulers]
if not isinstance(names, (list, tuple)):
raise TypeError(f"Argument names should be a list/tuple, but given {names}")
if not all(isinstance(n, str) for n in names):
raise ValueError(f"Argument names should be a list/tuple of parameter scheduler's names, but given {names}")
if len(names) != len(schedulers):
raise ValueError(f"{len(schedulers)} should be equal {len(names)}")
self.schedulers = schedulers
self.names = names
# schedulers should have save_history sync with ParamGroupScheduler
for s in schedulers:
s.save_history = save_history
self.optimizer = [s.optimizer for s in self.schedulers]
self.param_name = [s.param_name for s in self.schedulers]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
for scheduler, name in zip(self.schedulers, self.names):
scheduler(engine, name)
@property
def optimizer_param_groups(self) -> List[Dict[str, Any]]:
return [pg for scheduler in self.schedulers for pg in scheduler.optimizer_param_groups]
@property
def save_history(self) -> bool:
return self.schedulers[0].save_history
@save_history.setter
def save_history(self, value: bool) -> None:
for s in self.schedulers:
s.save_history = value
def state_dict(self) -> Dict[str, List[Any]]:
"""Returns a dictionary containing a whole state of ParamGroupScheduler.
Returns:
dict:
a dictionary containing a whole state of ParamGroupScheduler
"""
state_dict: Dict[str, List[Any]] = OrderedDict()
state_dict["schedulers"] = []
for n, s in zip(self.names, self.schedulers):
state_dict["schedulers"].append((n, s.state_dict()))
return state_dict
def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ParamScheduler.
Args:
state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
if "schedulers" not in state_dict:
raise ValueError(
f"Required state attribute '{'schedulers'}' is absent in provided state_dict '{state_dict.keys()}'"
)
sds = state_dict["schedulers"]
if len(sds) != len(self.schedulers):
raise ValueError(
f"Input state_dict contains {len(sds)} state_dicts of param group schedulers, "
f"but {len(self.schedulers)} needed"
)
for req_n, s, (n, sd) in zip(self.names, self.schedulers, sds):
if req_n != n:
raise ValueError(
f"Name of scheduler from input state dict does not correspond to required one, {n} vs {req_n}"
)
s.load_state_dict(sd)
@classmethod
def simulate_values(
cls, num_events: int, schedulers: List[ParamScheduler], **kwargs: Any
) -> List[List[Union[List[float], float, int]]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
schedulers: lr_scheduler object to wrap.
kwargs: kwargs passed to construct an instance of
:class:`ignite.handlers.param_scheduler.ParamGroupScheduler`.
Returns:
list:
list of [event_index, scheduler_0_value, scheduler_1_value, ...], where scheduler_i_value
corresponds to the simulated param of scheduler i at 'event_index'th event.
"""
# This scheduler uses `torch.optim.lr_scheduler.LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
with tempfile.TemporaryDirectory() as tmpdirname:
cache_filepath = Path(tmpdirname) / "ignite_lr_scheduler_cache.pt"
objs = {f"lr_scheduler_{i}": s.state_dict() for i, s in enumerate(schedulers)}
# all schedulers should be related to the same optimizer
objs["optimizer"] = schedulers[0].optimizer.state_dict()
torch.save(objs, cache_filepath.as_posix())
values = []
scheduler = cls(schedulers=schedulers, **kwargs)
for i in range(num_events):
params = [scheduler.get_param() for scheduler in schedulers]
values.append([i] + params)
scheduler(engine=None)
objs = torch.load(cache_filepath.as_posix())
for i, s in enumerate(schedulers):
s.load_state_dict(objs[f"lr_scheduler_{i}"])
s.optimizer.load_state_dict(objs["optimizer"])
return values
def get_param(self) -> List[Union[float, List[float]]]:
"""
Method to get current `schedulers`' parameter values
.. versionadded:: 0.4.11
"""
return [scheduler.get_param() for scheduler in self.schedulers]
class ReduceLROnPlateauScheduler(ParamScheduler):
"""Reduce LR when a metric stops improving.
Wrapper of `torch.optim.lr_scheduler.ReduceLROnPlateau
<https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ReduceLROnPlateau.html>`_.
Args:
optimizer: Wrapped optimizer.
metric_name: metric whose improvement is monitored.
Must be attached to the same engine.
trainer: Trainer engine to log LR history in its
`state.output.param_history`. Is used if `save_history`
is true. Default: None.
save_history: Whether to save history or not. If true,
history will be logged in `trainer`'s `state.output.param_history`.
Default: False.
param_group_index: `optimizer`'s parameters group
to use. Default: None. Use all `optimizer`'s paramater groups.
scheduler_kwargs: Keyword arguments to be passed to the wrapped ``ReduceLROnPlateau``.
Examples:
.. code-block:: python
# Metric "accuracy" should increase the best value by
# more than 1 unit after at most 2 epochs, otherwise LR
# would get multiplied by 0.5 .
scheduler = ReduceLROnPlateauScheduler(
default_optimizer,
metric_name="accuracy", mode="max",
factor=0.5, patience=1, threshold_mode='abs',
threshold=1, trainer=trainer
)
metric = Accuracy()
default_evaluator.attach(metric, "accuracy")
default_evaluator.add_event_handler(Events.COMPLETED, scheduler)
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
# Metric "loss" should decrease more than
# 0.1 of best loss after at most
# three iterations. Then best loss would get
# updated, otherwise lr is multiplied by 0.5
scheduler = ReduceLROnPlateauScheduler(
default_optimizer, "loss",
save_history=True, mode="min",
factor=0.5, patience=3, threshold_mode='rel',
threshold=0.1, trainer=default_trainer
)
metric_values = iter([10, 5, 3, 4, 4, 4, 5, 1])
default_evaluator.state.metrics = {"loss": None}
@default_trainer.on(Events.ITERATION_COMPLETED)
def set_metric_val():
default_evaluator.state.metrics["loss"] = next(metric_values)
default_evaluator.add_event_handler(Events.COMPLETED, scheduler)
@default_trainer.on(Events.ITERATION_COMPLETED)
def trigger_eval():
default_evaluator.run([0.])
default_trainer.run([0.] * 8)
print(default_trainer.state.param_history["lr"])
.. testoutput::
[[0.1], [0.1], [0.1], [0.1], [0.1], [0.1], [0.05], [0.05]]
.. versionadded:: 0.4.9
"""
def __init__(
self,
optimizer: Optimizer,
metric_name: str,
trainer: Optional[Engine] = None,
save_history: bool = False,
param_group_index: Optional[int] = None,
**scheduler_kwargs: Any,
):
super(ReduceLROnPlateauScheduler, self).__init__(
optimizer, "lr", save_history=save_history, param_group_index=param_group_index
)
self.metric_name = metric_name
self.trainer = trainer
self.optimizer = optimizer
if "min_lr" in scheduler_kwargs and param_group_index is not None:
min_lr = scheduler_kwargs["min_lr"]
if not isinstance(min_lr, float):
raise TypeError(f"When param_group_index is given, min_lr should be a float, but given {type(min_lr)}")
_min_lr = min_lr
min_lr = [0] * len(optimizer.param_groups)
min_lr[param_group_index] = _min_lr
else:
min_lr = 0
_scheduler_kwargs = scheduler_kwargs.copy()
_scheduler_kwargs["min_lr"] = min_lr
if "verbose" in _scheduler_kwargs:
warnings.warn(
"Found verbose=True in provided scheduler_kwargs. "
"It would be set to False. Please use save_history instead."
)
_scheduler_kwargs["verbose"] = False
self.scheduler = ReduceLROnPlateau(optimizer, **_scheduler_kwargs)
self.scheduler._reduce_lr = self._reduce_lr # type: ignore[attr-defined]
self._state_attrs += ["metric_name", "scheduler"]
def __call__(self, engine: Engine, name: Optional[str] = None) -> None: # type: ignore[override]
if not hasattr(engine.state, "metrics") or self.metric_name not in engine.state.metrics:
raise ValueError(
"Argument engine should have in its 'state', attribute 'metrics' "
f"which itself has the metric {self.metric_name}."
)
self.scheduler.step(engine.state.metrics[self.metric_name])
super().__call__(self.trainer, name)
def get_param(self) -> Union[float, List[float]]:
lrs = [pg["lr"] for pg in self.optimizer_param_groups]
return lrs[0] if len(lrs) == 1 else lrs
def _reduce_lr(self, epoch: int) -> None:
for i, param_group in enumerate(self.optimizer_param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.scheduler.factor, self.scheduler.min_lrs[i])
if old_lr - new_lr > self.scheduler.eps:
param_group["lr"] = new_lr
@classmethod
def simulate_values( # type: ignore[override]
cls, num_events: int, metric_values: List[float], init_lr: float, **scheduler_kwargs: Any
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
num_events: number of events during the simulation.
metric_values: values to change LR based on.
init_lr: initial LR to start with.
scheduler_kwargs: kwargs passed to construct an instance of
:class:`ignite.handlers.param_scheduler.ReduceLROnPlateauScheduler`.
Returns:
event_index, value
"""
if len(metric_values) != num_events:
raise ValueError(
"Length of argument metric_values should be equal to num_events. "
f"{len(metric_values)} != {num_events}"
)
keys_to_remove = ["optimizer", "metric_name", "save_history"]
for key in keys_to_remove:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(
optimizer=_get_fake_optimizer(torch.optim.SGD, lr=init_lr),
metric_name="metric",
save_history=False,
**scheduler_kwargs,
)
engine = Engine(lambda _, __: None)
for i in range(num_events):
engine.state.metrics["metric"] = metric_values[i]
scheduler(engine=engine)
values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
return values
def _get_fake_optimizer(
optimizer_cls: Optional[Union[Type[Optimizer], Type[torch.optim.SGD]]] = None, **kwargs: Any
) -> Union[Optimizer, torch.optim.SGD]:
t = torch.zeros([1], requires_grad=True)
if optimizer_cls is None:
optimizer_cls = torch.optim.SGD
kwargs["lr"] = 0.01
return optimizer_cls([t], **kwargs)
|
import collections.abc as collections
import numbers
import os
import stat
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, NamedTuple, Optional, Tuple, Union
import torch
import torch.nn as nn
from packaging.version import Version
if Version(torch.__version__) >= Version("1.9.0"):
from torch.distributed.optim import ZeroRedundancyOptimizer
HAVE_ZERO = True
else:
HAVE_ZERO = False
import ignite.distributed as idist
from ignite.base import Serializable
from ignite.engine import Engine, Events
__all__ = ["Checkpoint", "DiskSaver", "ModelCheckpoint", "BaseSaveHandler"]
class BaseSaveHandler(metaclass=ABCMeta):
"""Base class for save handlers
Methods to override:
- :meth:`~ignite.handlers.checkpoint.BaseSaveHandler.__call__`
- :meth:`~ignite.handlers.checkpoint.BaseSaveHandler.remove`
Note:
In derived class, please, make sure that in distributed configuration overridden methods are called by a single
process. Distributed configuration on XLA devices should be treated slightly differently: for saving checkpoint
with `xm.save() <https://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.save>`_ all processes
should pass into the function. Otherwise, application gets stuck.
"""
@abstractmethod
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
"""Method to save `checkpoint` with `filename`. Additionally, metadata dictionary is provided.
Metadata contains:
- `basename`: file prefix (if provided) with checkpoint name, e.g. `epoch_checkpoint`.
- `score_name`: score name if provided, e.g `val_acc`.
- `priority`: checkpoint priority value (higher is better), e.g. `12` or `0.6554435`
Args:
checkpoint: checkpoint dictionary to save.
filename: filename associated with checkpoint.
metadata: metadata on checkpoint to save.
"""
@abstractmethod
def remove(self, filename: str) -> None:
"""Method to remove saved checkpoint.
Args:
filename: filename associated with checkpoint.
"""
class Checkpoint(Serializable):
"""Checkpoint handler can be used to periodically save and load objects which have attribute
``state_dict/load_state_dict``. This class can use specific save handlers to store on the disk or a cloud
storage, etc. The Checkpoint handler (if used with :class:`~ignite.handlers.DiskSaver`) also handles automatically
moving data on TPU to CPU before writing the checkpoint.
Args:
to_save: Dictionary with the objects to save. Objects should have implemented ``state_dict`` and
``load_state_dict`` methods. If contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, their internal wrapped model is automatically saved (to avoid additional key ``module.`` in
the state dictionary).
save_handler: String, function or callable object.
used to save engine and other provided objects. Function receives two objects: checkpoint as a dictionary
and filename. If ``save_handler`` is callable class, it can
inherit of :class:`~ignite.handlers.checkpoint.BaseSaveHandler` and optionally implement ``remove`` method
to keep a fixed number of saved checkpoints. In case if user needs to save engine's checkpoint on a disk,
``save_handler`` can be defined with :class:`~ignite.handlers.DiskSaver` or a string specifying
directory name can be passed to ``save_handler``.
filename_prefix: Prefix for the file name to which objects will be saved. See Note for details.
score_function: If not None, it should be a function taking a single argument,
:class:`~ignite.engine.engine.Engine` object, and returning a score (`float`). Objects with highest scores
will be retained.
score_name: If ``score_function`` not None, it is possible to store its value using
``score_name``. If ``score_function`` is None, ``score_name`` can be used alone to define ``score_function``
as ``Checkpoint.get_default_score_fn(score_name)`` by default.
n_saved: Number of objects that should be kept on disk. Older files will be removed. If set to
`None`, all objects are kept.
global_step_transform: global step transform function to output a desired global step.
Input of the function is ``(engine, event_name)``. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided, uses function output as global_step.
To setup global step from another engine, please use :meth:`~ignite.handlers.global_step_from_engine`.
filename_pattern: If ``filename_pattern`` is provided, this pattern will be used to render
checkpoint filenames. If the pattern is not defined, the default pattern would be used. See Note for
details.
include_self: Whether to include the `state_dict` of this object in the checkpoint. If `True`, then
there must not be another object in ``to_save`` with key ``checkpointer``.
greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise, the first model.
Default, `False`.
save_on_rank: Which rank to save the objects on, in the distributed configuration. If ``save_handler`` is
string or :class:`~pathlib.Path`, this is also used to instantiate a :class:`~ignite.handlers.DiskSaver`.
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
Note:
This class stores a single file as a dictionary of provided objects to save.
The filename is defined by ``filename_pattern`` and by default has the following
structure: ``{filename_prefix}_{name}_{suffix}.{ext}`` where
- ``filename_prefix`` is the argument passed to the constructor,
- `name` is the key in ``to_save`` if a single object is to store, otherwise `name` is "checkpoint".
- `suffix` is composed as following ``{global_step}_{score_name}={score}``.
+----------------+------------+-----------------------+----------------------------------------------+
| score_function | score_name | global_step_transform | suffix |
+================+============+=======================+==============================================+
| None | None | None | ``{engine.state.iteration}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | None | None | ``{score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | None | X | ``{global_step}_{score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | X | X | ``{global_step}_{score_name}={score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| None | None | X | ``{global_step}`` |
+----------------+------------+-----------------------+----------------------------------------------+
| X | X | None | ``{score_name}={score}`` |
+----------------+------------+-----------------------+----------------------------------------------+
Above `global_step` defined by the output of `global_step_transform` and `score` defined by the output
of `score_function`.
By default, none of ``score_function``, ``score_name``, ``global_step_transform`` is defined, then suffix is
setup by attached engine's current iteration. The filename will be
`{filename_prefix}_{name}_{engine.state.iteration}.{ext}`.
For example, ``score_name="neg_val_loss"`` and ``score_function`` that returns `-loss` (as objects with highest
scores will be retained), then saved filename will be ``{filename_prefix}_{name}_neg_val_loss=-0.1234.pt``.
Note:
If ``filename_pattern`` is given, it will be used to render the filenames. ``filename_pattern`` is a string
that can contain ``{filename_prefix}``, ``{name}``, ``{score}``, ``{score_name}`` and ``{global_step}`` as
templates.
For example, let ``filename_pattern="{global_step}-{name}-{score}.pt"`` then the saved filename will be
``30000-checkpoint-94.pt``
**Warning:** Please, keep in mind that if filename collide with already used one to saved a checkpoint,
new checkpoint will replace the older one. This means that filename like ``checkpoint.pt`` will be saved
every call and will always be overwritten by newer checkpoints.
Note:
To get the last stored filename, handler exposes attribute ``last_checkpoint``:
.. code-block:: python
handler = Checkpoint(...)
...
print(handler.last_checkpoint)
> checkpoint_12345.pt
Note:
This class is distributed configuration-friendly: it is not required to instantiate the class in rank 0 only
process. This class supports automatically distributed configuration and if used with
:class:`~ignite.handlers.DiskSaver`, checkpoint is stored by rank 0 process.
.. warning::
When running on XLA devices or using :class:`~torch.distributed.optim.ZeroRedundancyOptimizer`, it
should be run in all processes, otherwise application can get stuck while saving the checkpoint.
.. code-block:: python
# Wrong:
# if idist.get_rank() == 0:
# handler = Checkpoint(...)
# trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
# Correct:
handler = Checkpoint(...)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
Examples:
Attach the handler to make checkpoints during training:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint
trainer = ...
model = ...
optimizer = ...
lr_scheduler = ...
to_save = {'model': model, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler, 'trainer': trainer}
if (checkpoint_iters):
# A: Output is "checkpoint_<iteration>.pt"
handler = Checkpoint(
to_save, '/tmp/models', n_saved=2
)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), handler)
else:
# B:Output is "checkpoint_<epoch>.pt"
gst = lambda *_: trainer.state.epoch
handler = Checkpoint(
to_save, '/tmp/models', n_saved=2, global_step_transform=gst
)
trainer.add_event_handler(Events.EPOCH_COMPLETED, handler)
trainer.run(data_loader, max_epochs=6)
> A: ["checkpoint_7000.pt", "checkpoint_8000.pt", ]
> B: ["checkpoint_5.pt", "checkpoint_6.pt", ]
Attach the handler to an evaluator to save best model during the training
according to computed validation metric:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, global_step_from_engine
trainer = ...
evaluator = ...
# Setup Accuracy metric computation on evaluator.
# evaluator.state.metrics contain 'accuracy',
# which will be used to define ``score_function`` automatically.
# Run evaluation on epoch completed event
# ...
to_save = {'model': model}
handler = Checkpoint(
to_save, '/tmp/models',
n_saved=2, filename_prefix='best',
score_name="accuracy",
global_step_transform=global_step_from_engine(trainer)
)
evaluator.add_event_handler(Events.COMPLETED, handler)
trainer.run(data_loader, max_epochs=10)
> ["best_model_9_accuracy=0.77.pt", "best_model_10_accuracy=0.78.pt", ]
Customise the ``save_handler``:
.. code-block:: python
handler = Checkpoint(
to_save, save_handler=DiskSaver('/tmp/models', create_dir=True, **kwargs), n_saved=2
)
.. versionchanged:: 0.4.3
- Checkpoint can save model with same filename.
- Added ``greater_or_equal`` argument.
.. versionchanged:: 0.4.7
- `score_name` can be used to define `score_function` automatically without providing `score_function`.
- `save_handler` automatically saves to disk if path to directory is provided.
- `save_on_rank` saves objects on this rank in a distributed configuration.
"""
Item = NamedTuple("Item", [("priority", int), ("filename", str)])
_state_dict_all_req_keys = ("saved",)
def __init__(
self,
to_save: Mapping,
save_handler: Union[str, Path, Callable, BaseSaveHandler],
filename_prefix: str = "",
score_function: Optional[Callable] = None,
score_name: Optional[str] = None,
n_saved: Union[int, None] = 1,
global_step_transform: Optional[Callable] = None,
filename_pattern: Optional[str] = None,
include_self: bool = False,
greater_or_equal: bool = False,
save_on_rank: int = 0,
):
if not isinstance(to_save, collections.Mapping):
raise TypeError(f"Argument `to_save` should be a dictionary, but given {type(to_save)}")
self._check_objects(to_save, "state_dict")
if include_self:
if not isinstance(to_save, collections.MutableMapping):
raise TypeError(
f"If `include_self` is True, then `to_save` must be mutable, but given {type(to_save)}."
)
if "checkpointer" in to_save:
raise ValueError(f"Cannot have key 'checkpointer' if `include_self` is True: {to_save}")
if not (
isinstance(save_handler, str)
or isinstance(save_handler, Path)
or callable(save_handler)
or isinstance(save_handler, BaseSaveHandler)
):
raise TypeError(
"Argument `save_handler` should be a string or Path object or callable or inherit from BaseSaveHandler"
)
if global_step_transform is not None and not callable(global_step_transform):
raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.")
self.to_save = to_save
self.filename_prefix = filename_prefix
if isinstance(save_handler, str) or isinstance(save_handler, Path):
self.save_handler = DiskSaver(save_handler, create_dir=True, save_on_rank=save_on_rank)
else:
self.save_handler = save_handler # type: ignore
self.score_function = score_function
self.score_name = score_name
if self.score_name is not None and self.score_function is None:
self.score_function = self.get_default_score_fn(self.score_name)
self.n_saved = n_saved
self.ext = "pt"
self.global_step_transform = global_step_transform
self.filename_pattern = filename_pattern
self._saved: List["Checkpoint.Item"] = []
self.include_self = include_self
self.greater_or_equal = greater_or_equal
self.save_on_rank = save_on_rank
def _get_filename_pattern(self, global_step: Optional[int]) -> str:
if self.filename_pattern is None:
filename_pattern = self.setup_filename_pattern(
with_prefix=len(self.filename_prefix) > 0,
with_score=self.score_function is not None,
with_score_name=self.score_name is not None,
with_global_step=global_step is not None,
)
else:
filename_pattern = self.filename_pattern
return filename_pattern
def reset(self) -> None:
"""Method to reset saved checkpoint names.
Use this method if the engine will independently run multiple times:
.. code-block:: python
from ignite.handlers import Checkpoint
trainer = ...
checkpointer = Checkpoint(...)
trainer.add_event_handler(Events.COMPLETED, checkpointer)
trainer.add_event_handler(Events.STARTED, checkpointer.reset)
# fold 0
trainer.run(data0, max_epochs=max_epochs)
print("Last checkpoint:", checkpointer.last_checkpoint)
# fold 1
trainer.run(data1, max_epochs=max_epochs)
print("Last checkpoint:", checkpointer.last_checkpoint)
.. versionadded:: 0.4.3
"""
self._saved = []
@property
def last_checkpoint(self) -> Optional[Union[str, Path]]:
if len(self._saved) < 1:
return None
if not isinstance(self.save_handler, DiskSaver):
return self._saved[-1].filename
return self.save_handler.dirname / self._saved[-1].filename
def _check_lt_n_saved(self, or_equal: bool = False) -> bool:
if self.n_saved is None:
return True
return len(self._saved) < self.n_saved + int(or_equal)
def _compare_fn(self, new: Union[int, float]) -> bool:
if self.greater_or_equal:
return new >= self._saved[0].priority
else:
return new > self._saved[0].priority
def __call__(self, engine: Engine) -> None:
global_step = None
if self.global_step_transform is not None:
global_step = self.global_step_transform(engine, engine.last_event_name)
if self.score_function is not None:
priority = self.score_function(engine)
if not isinstance(priority, numbers.Number):
raise ValueError("Output of score_function should be a number")
else:
if global_step is None:
global_step = engine.state.get_event_attrib_value(Events.ITERATION_COMPLETED)
priority = global_step
if self._check_lt_n_saved() or self._compare_fn(priority):
priority_str = f"{priority}" if isinstance(priority, numbers.Integral) else f"{priority:.4f}"
checkpoint = self._setup_checkpoint()
name = "checkpoint"
if len(checkpoint) == 1:
for k in checkpoint:
name = k
checkpoint = checkpoint[name]
filename_pattern = self._get_filename_pattern(global_step)
filename_dict = {
"filename_prefix": self.filename_prefix,
"ext": self.ext,
"name": name,
"score_name": self.score_name,
"score": priority_str if (self.score_function is not None) else None,
"global_step": global_step,
}
filename = filename_pattern.format(**filename_dict)
metadata = {
"basename": f"{self.filename_prefix}{'_' * int(len(self.filename_prefix) > 0)}{name}",
"score_name": self.score_name,
"priority": priority,
}
try:
index = list(map(lambda it: it.filename == filename, self._saved)).index(True)
to_remove = True
except ValueError:
index = 0
to_remove = not self._check_lt_n_saved()
if to_remove:
item = self._saved.pop(index)
if isinstance(self.save_handler, BaseSaveHandler):
self.save_handler.remove(item.filename)
self._saved.append(Checkpoint.Item(priority, filename))
self._saved.sort(key=lambda it: it[0])
if self.include_self:
# Now that we've updated _saved, we can add our own state_dict.
checkpoint["checkpointer"] = self.state_dict()
try:
self.save_handler(checkpoint, filename, metadata)
except TypeError:
self.save_handler(checkpoint, filename)
def _setup_checkpoint(self) -> Dict[str, Dict[Any, Any]]:
checkpoint = {}
if self.to_save is not None:
for k, obj in self.to_save.items():
if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
obj = obj.module
elif HAVE_ZERO and isinstance(obj, ZeroRedundancyOptimizer):
obj.consolidate_state_dict(to=self.save_on_rank)
if self.save_on_rank != idist.get_rank():
continue
checkpoint[k] = obj.state_dict()
return checkpoint
@staticmethod
def setup_filename_pattern(
with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True
) -> str:
"""Helper method to get the default filename pattern for a checkpoint.
Args:
with_prefix: If True, the ``filename_prefix`` is added to the filename pattern:
``{filename_prefix}_{name}...``. Default, True.
with_score: If True, ``score`` is added to the filename pattern: ``..._{score}.{ext}``.
Default, True. At least one of ``with_score`` and ``with_global_step`` should be True.
with_score_name: If True, ``score_name`` is added to the filename pattern:
``..._{score_name}={score}.{ext}``. If activated, argument ``with_score`` should be
also True, otherwise an error is raised. Default, True.
with_global_step: If True, ``{global_step}`` is added to the
filename pattern: ``...{name}_{global_step}...``.
At least one of ``with_score`` and ``with_global_step`` should be True.
Examples:
.. code-block:: python
from ignite.handlers import Checkpoint
filename_pattern = Checkpoint.setup_filename_pattern()
print(filename_pattern)
> "{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}"
.. versionadded:: 0.4.3
"""
filename_pattern = "{name}"
if not (with_global_step or with_score):
raise ValueError("At least one of with_score and with_global_step should be True.")
if with_global_step:
filename_pattern += "_{global_step}"
if with_score_name and with_score:
filename_pattern += "_{score_name}={score}"
elif with_score:
filename_pattern += "_{score}"
elif with_score_name:
raise ValueError("If with_score_name is True, with_score should be also True")
if with_prefix:
filename_pattern = "{filename_prefix}_" + filename_pattern
filename_pattern += ".{ext}"
return filename_pattern
@staticmethod
def _check_objects(objs: Mapping, attr: str) -> None:
for k, obj in objs.items():
if not hasattr(obj, attr):
raise TypeError(f"Object {type(obj)} should have `{attr}` method")
@staticmethod
def load_objects(to_load: Mapping, checkpoint: Union[str, Mapping, Path], **kwargs: Any) -> None:
"""Helper method to apply ``load_state_dict`` on the objects from ``to_load`` using states from ``checkpoint``.
Args:
to_load: a dictionary with objects, e.g. `{"model": model, "optimizer": optimizer, ...}`
checkpoint: a path, a string filepath or a dictionary with state_dicts to load, e.g.
`{"model": model_state_dict, "optimizer": opt_state_dict}`. If `to_load` contains a single key,
then checkpoint can contain directly corresponding state_dict.
kwargs: Keyword arguments accepted for `nn.Module.load_state_dict()`. Passing `strict=False` enables
the user to load part of the pretrained model (useful for example, in Transfer Learning)
Examples:
.. code-block:: python
import tempfile
from pathlib import Path
import torch
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Checkpoint
trainer = Engine(lambda engine, batch: None)
with tempfile.TemporaryDirectory() as tmpdirname:
handler = ModelCheckpoint(tmpdirname, 'myprefix', n_saved=None, create_dir=True)
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
to_save = {"weights": model, "optimizer": optimizer}
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), handler, to_save)
trainer.run(torch.randn(10, 1), 5)
to_load = to_save
checkpoint_fp = Path(tmpdirname) / 'myprefix_checkpoint_40.pt'
checkpoint = torch.load(checkpoint_fp)
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint)
# or using a string for checkpoint filepath
to_load = to_save
checkpoint_fp = Path(tmpdirname) / 'myprefix_checkpoint_40.pt'
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint_fp)
Note:
If ``to_load`` contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, method ``load_state_dict`` will applied to their internal wrapped model (``obj.module``).
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
"""
if isinstance(checkpoint, (str, Path)):
checkpoint_obj = torch.load(checkpoint)
else:
checkpoint_obj = checkpoint
Checkpoint._check_objects(to_load, "load_state_dict")
if not isinstance(checkpoint, (collections.Mapping, str, Path)):
raise TypeError(f"Argument checkpoint should be a string or a dictionary, but given {type(checkpoint)}")
if len(kwargs) > 1 or any(k for k in kwargs if k not in ["strict"]):
warnings.warn("kwargs contains keys other than strict and these will be ignored")
is_state_dict_strict = kwargs.get("strict", True)
def _load_object(obj: Any, chkpt_obj: Any) -> None:
if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
obj = obj.module
if isinstance(obj, torch.nn.Module):
obj.load_state_dict(chkpt_obj, strict=is_state_dict_strict)
else:
obj.load_state_dict(chkpt_obj)
if len(to_load) == 1:
# single object and checkpoint is directly a state_dict
key, obj = list(to_load.items())[0]
if key not in checkpoint_obj:
_load_object(obj, checkpoint_obj)
return
# multiple objects to load
for k, obj in to_load.items():
if k not in checkpoint_obj:
raise ValueError(f"Object labeled by '{k}' from `to_load` is not found in the checkpoint")
_load_object(obj, checkpoint_obj[k])
def reload_objects(self, to_load: Mapping, load_kwargs: Optional[Dict] = None, **filename_components: Any) -> None:
"""Helper method to apply ``load_state_dict`` on the objects from ``to_load``. Filename components such as
name, score and global state can be configured.
Args:
to_load: a dictionary with objects, e.g. `{"model": model, "optimizer": optimizer, ...}`
load_kwargs: Keyword arguments accepted for `nn.Module.load_state_dict()`. Passing `strict=False` enables
the user to load part of the pretrained model (useful for example, in Transfer Learning)
filename_components: Filename components used to define the checkpoint file path.
Keyword arguments accepted are `name`, `score` and `global_state`.
Examples:
.. code-block:: python
import tempfile
import torch
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
trainer = Engine(lambda engine, batch: None)
with tempfile.TemporaryDirectory() as tmpdirname:
checkpoint = ModelCheckpoint(tmpdirname, 'myprefix', n_saved=None, create_dir=True)
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
to_save = {"weights": model, "optimizer": optimizer}
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), checkpoint, to_save)
trainer.run(torch.randn(10, 1), 5)
to_load = to_save
# load checkpoint myprefix_checkpoint_40.pt
checkpoint.reload_objects(to_load=to_load, global_step=40)
Note:
If ``to_load`` contains objects of type torch `DistributedDataParallel`_ or
`DataParallel`_, method ``load_state_dict`` will applied to their internal wrapped model (``obj.module``).
Note:
This method works only when the ``save_handler`` is of types string,
:class:`~pathlib.Path` or :class:`~ignite.handlers.checkpoint.DiskSaver`.
.. _DistributedDataParallel: https://pytorch.org/docs/stable/generated/
torch.nn.parallel.DistributedDataParallel.html
.. _DataParallel: https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html
"""
if not isinstance(self.save_handler, DiskSaver):
raise AttributeError(
f"Checkpoint's `save_handler` should be of type `DiskSaver`, given {type(self.save_handler)}"
)
global_step = filename_components.get("global_step", None)
filename_pattern = self._get_filename_pattern(global_step)
checkpoint = self._setup_checkpoint()
name = "checkpoint"
if len(checkpoint) == 1:
for k in checkpoint:
name = k
name = filename_components.get("name", name)
score = filename_components.get("score", None)
filename_dict = {
"filename_prefix": self.filename_prefix,
"ext": self.ext,
"name": name,
"score_name": self.score_name,
"score": score,
"global_step": global_step,
}
checkpoint_fp = filename_pattern.format(**filename_dict)
path = self.save_handler.dirname / checkpoint_fp
load_kwargs = {} if load_kwargs is None else load_kwargs
Checkpoint.load_objects(to_load=to_load, checkpoint=path, **load_kwargs)
def state_dict(self) -> "OrderedDict[str, List[Tuple[int, str]]]":
"""Method returns state dict with saved items: list of ``(priority, filename)`` pairs.
Can be used to save internal state of the class.
"""
return OrderedDict([("saved", [(p, f) for p, f in self._saved])])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replaces internal state of the class with provided state dict data.
Args:
state_dict: a dict with "saved" key and list of ``(priority, filename)`` pairs as values.
"""
super().load_state_dict(state_dict)
self._saved = [Checkpoint.Item(p, f) for p, f in state_dict["saved"]]
@staticmethod
def get_default_score_fn(metric_name: str, score_sign: float = 1.0) -> Callable:
"""Helper method to get default score function based on the metric name.
Args:
metric_name: metric name to get the value from ``engine.state.metrics``.
Engine is the one to which :class:`~ignite.handlers.checkpoint.Checkpoint` handler is added.
score_sign: sign of the score: 1.0 or -1.0. For error-like metrics, e.g. smaller is better,
a negative score sign should be used (objects with larger score are retained). Default, 1.0.
Examples:
.. code-block:: python
from ignite.handlers import Checkpoint
best_acc_score = Checkpoint.get_default_score_fn("accuracy")
best_model_handler = Checkpoint(
to_save, save_handler, score_name="val_accuracy", score_function=best_acc_score
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
Usage with error-like metric:
.. code-block:: python
from ignite.handlers import Checkpoint
neg_loss_score = Checkpoint.get_default_score_fn("loss", -1.0)
best_model_handler = Checkpoint(
to_save, save_handler, score_name="val_neg_loss", score_function=neg_loss_score
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler)
.. versionadded:: 0.4.3
"""
if score_sign not in (1.0, -1.0):
raise ValueError("Argument score_sign should be 1 or -1")
def wrapper(engine: Engine) -> float:
return score_sign * engine.state.metrics[metric_name]
return wrapper
class DiskSaver(BaseSaveHandler):
"""Handler that saves input checkpoint on a disk.
Args:
dirname: Directory path where the checkpoint will be saved
atomic: if True, checkpoint is serialized to a temporary file, and then
moved to final destination, so that files are guaranteed to not be damaged
(for example if exception occurs during saving).
create_dir: if True, will create directory ``dirname`` if it doesnt exist.
require_empty: If True, will raise exception if there are any files in the
directory ``dirname``.
save_on_rank: The rank on which the checkpoint will be saved. Used in distributed
configuration.
kwargs: Accepted keyword arguments for `torch.save` or `xm.save`.
.. versionchanged:: 0.4.2
Accept ``kwargs`` for `torch.save` or `xm.save`.
.. versionchanged:: 0.4.10
Argument ``save_on_rank`` was added to specify the rank on which checkpoint should be saved.
"""
def __init__(
self,
dirname: Union[str, Path],
atomic: bool = True,
create_dir: bool = True,
require_empty: bool = True,
save_on_rank: int = 0,
**kwargs: Any,
):
self.dirname = Path(dirname).expanduser()
self._atomic = atomic
self.save_on_rank = save_on_rank
if idist.get_rank() == save_on_rank:
self._check_and_setup(self.dirname, create_dir, require_empty)
self.kwargs = kwargs
@staticmethod
def _check_and_setup(dirname: Path, create_dir: bool, require_empty: bool) -> None:
if create_dir:
if not dirname.exists():
dirname.mkdir(parents=True)
# Ensure that dirname exists
if not dirname.exists():
raise ValueError(f"Directory path '{dirname}' is not found")
if require_empty:
matched = [fname for fname in os.listdir(dirname) if fname.endswith(".pt")]
if len(matched) > 0:
raise ValueError(
f"Files {matched} with extension '.pt' are already present "
f"in the directory {dirname}. If you want to use this "
"directory anyway, pass `require_empty=False`."
""
)
def __call__(self, checkpoint: Mapping, filename: str, metadata: Optional[Mapping] = None) -> None:
path = self.dirname / filename
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
# all tpu procs should enter here as internally performs sync across device
self._save_func(checkpoint, path, xm.save)
elif self.save_on_rank == idist.get_rank():
self._save_func(checkpoint, path, torch.save)
def _save_func(self, checkpoint: Mapping, path: Path, func: Callable) -> None:
if not self._atomic:
func(checkpoint, path, **self.kwargs)
else:
tmp = tempfile.NamedTemporaryFile(delete=False, dir=self.dirname)
tmp_file = tmp.file
tmp_name = tmp.name
try:
func(checkpoint, tmp_file, **self.kwargs)
except BaseException:
tmp.close()
os.remove(tmp_name)
raise
else:
tmp.close()
os.replace(tmp.name, path)
# append group/others read mode
os.chmod(path, os.stat(path).st_mode | stat.S_IRGRP | stat.S_IROTH)
def remove(self, filename: str) -> None:
if idist.get_rank() == self.save_on_rank:
path = self.dirname / filename
path.unlink()
class ModelCheckpoint(Checkpoint):
"""ModelCheckpoint handler, inherits from :class:`~ignite.handlers.checkpoint.Checkpoint`, can be used
to periodically save objects to disk only. If needed to store checkpoints to
another storage type, please consider :class:`~ignite.handlers.checkpoint.Checkpoint`.
It also provides `last_checkpoint` attribute to show the last saved checkpoint.
This handler expects two arguments:
- an :class:`~ignite.engine.engine.Engine` object
- a `dict` mapping names (`str`) to objects that should be saved to disk.
See Examples for further details.
.. warning::
Behaviour of this class has been changed since v0.3.0.
There is no more internal counter that has been used to indicate the number of save actions. User could
see its value `step_number` in the filename, e.g. `{filename_prefix}_{name}_{step_number}.pt`. Actually,
`step_number` is replaced by current engine's epoch if `score_function` is specified and current iteration
otherwise.
A single `pt` file is created instead of multiple files.
Args:
dirname: Directory path where objects will be saved.
filename_prefix: Prefix for the file names to which objects will be saved. See Notes of
:class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
score_function: if not None, it should be a function taking a single argument, an
:class:`~ignite.engine.engine.Engine` object, and return a score (`float`). Objects with highest scores
will be retained.
score_name: if ``score_function`` not None, it is possible to store its value using
`score_name`. See Examples of :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
n_saved: Number of objects that should be kept on disk. Older files will be removed. If set to
`None`, all objects are kept.
atomic: If True, objects are serialized to a temporary file, and then moved to final
destination, so that files are guaranteed to not be damaged (for example if exception
occurs during saving).
require_empty: If True, will raise exception if there are any files starting with
``filename_prefix`` in the directory ``dirname``.
create_dir: If True, will create directory ``dirname`` if it does not exist.
global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided, uses function output as global_step.
To setup global step from another engine, please use :meth:`~ignite.handlers.global_step_from_engine`.
filename_pattern: If ``filename_pattern`` is provided, this pattern will be used to render
checkpoint filenames. If the pattern is not defined, the default pattern would be used.
See :class:`~ignite.handlers.checkpoint.Checkpoint` for details.
include_self: Whether to include the `state_dict` of this object in the checkpoint. If `True`, then
there must not be another object in ``to_save`` with key ``checkpointer``.
greater_or_equal: if `True`, the latest equally scored model is stored. Otherwise, the first model.
Default, `False`.
save_on_rank: Which rank to save the objects on, in the distributed configuration. Used to
instantiate a :class:`~ignite.handlers.DiskSaver` and is also passed to the parent class.
kwargs: Accepted keyword arguments for `torch.save` or `xm.save` in `DiskSaver`.
.. versionchanged:: 0.4.2
Accept ``kwargs`` for `torch.save` or `xm.save`
.. versionchanged:: 0.4.9
Accept ``filename_pattern`` and ``greater_or_equal`` for parity
with :class:`~ignite.handlers.checkpoint.Checkpoint`
.. versionchanged:: 0.4.10
Added `save_on_rank` arg to save objects on this rank in a distributed configuration
Examples:
.. testcode:: python
import os
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from torch import nn
trainer = Engine(lambda engine, batch: None)
handler = ModelCheckpoint('/tmp/models', 'myprefix', n_saved=2, create_dir=True, require_empty=False)
model = nn.Linear(3, 3)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=2), handler, {'mymodel': model})
trainer.run([0, 1, 2, 3, 4], max_epochs=6)
print(sorted(os.listdir('/tmp/models')))
print(handler.last_checkpoint)
.. testoutput:: python
['myprefix_mymodel_20.pt', 'myprefix_mymodel_30.pt']
/tmp/models/myprefix_mymodel_30.pt
"""
def __init__(
self,
dirname: Union[str, Path],
filename_prefix: str = "",
score_function: Optional[Callable] = None,
score_name: Optional[str] = None,
n_saved: Union[int, None] = 1,
atomic: bool = True,
require_empty: bool = True,
create_dir: bool = True,
global_step_transform: Optional[Callable] = None,
filename_pattern: Optional[str] = None,
include_self: bool = False,
greater_or_equal: bool = False,
save_on_rank: int = 0,
**kwargs: Any,
):
disk_saver = DiskSaver(
dirname,
atomic=atomic,
create_dir=create_dir,
require_empty=require_empty,
save_on_rank=save_on_rank,
**kwargs,
)
super(ModelCheckpoint, self).__init__(
to_save={},
save_handler=disk_saver,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
n_saved=n_saved,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
include_self=include_self,
greater_or_equal=greater_or_equal,
save_on_rank=save_on_rank,
)
@property
def last_checkpoint(self) -> Optional[Union[str, Path]]:
if len(self._saved) < 1:
return None
if not isinstance(self.save_handler, DiskSaver):
raise RuntimeError(f"Internal error, save_handler should be DiskSaver, but has {type(self.save_handler)}.")
return self.save_handler.dirname / self._saved[-1].filename
def __call__(self, engine: Engine, to_save: Mapping): # type: ignore
if len(to_save) == 0:
raise RuntimeError("No objects to checkpoint found.")
self._check_objects(to_save, "state_dict")
self.to_save = to_save
super(ModelCheckpoint, self).__call__(engine)
|
import logging
import numbers
from typing import Callable, Union
import torch
from ignite.engine import Engine
from ignite.utils import apply_to_type, setup_logger
__all__ = ["TerminateOnNan"]
class TerminateOnNan:
"""TerminateOnNan handler can be used to stop the training if the `process_function`'s output
contains a NaN or infinite number or `torch.tensor`.
The output can be of type: number, tensor or collection of them. The training is stopped if
there is at least a single number/tensor have NaN or Infinite value. For example, if the output is
`[1.23, torch.tensor(...), torch.tensor(float('nan'))]` the handler will stop the training.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into a number or `torch.tensor`
or collection of them. This can be useful if, for example, you have a multi-output model and
you want to check one or multiple values of the output.
Examples:
.. code-block:: python
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
self.logger.addHandler(logging.StreamHandler())
self._output_transform = output_transform
def __call__(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
def raise_error(x: Union[float, torch.Tensor]) -> None:
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self.logger.warning(f"{self.__class__.__name__}: Output '{output}' contains NaN or Inf. Stop training")
engine.terminate()
|
# coding: utf-8
import contextlib
import logging
import tempfile
import warnings
from math import ceil
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint
from ignite.handlers.param_scheduler import LRScheduler, ParamGroupScheduler, PiecewiseLinear
class FastaiLRFinder:
"""Learning rate finder handler for supervised trainers.
While attached, the handler increases the learning rate in between two
boundaries in a linear or exponential manner. It provides valuable
information on how well the network can be trained over a range of learning
rates and what can be an optimal learning rate.
Examples:
.. code-block:: python
from ignite.handlers import FastaiLRFinder
trainer = ...
model = ...
optimizer = ...
lr_finder = FastaiLRFinder()
to_save = {"model": model, "optimizer": optimizer}
with lr_finder.attach(trainer, to_save=to_save) as trainer_with_lr_finder:
trainer_with_lr_finder.run(dataloader)
# Get lr_finder results
lr_finder.get_results()
# Plot lr_finder results (requires matplotlib)
lr_finder.plot()
# get lr_finder suggestion for lr
lr_finder.lr_suggestion()
Note:
When context manager is exited all LR finder's handlers are removed.
Note:
Please, also keep in mind that all other handlers attached the trainer will be executed during LR finder's run.
Note:
This class may require `matplotlib` package to be installed to plot learning rate range test:
.. code-block:: bash
pip install matplotlib
References:
Cyclical Learning Rates for Training Neural Networks:
https://arxiv.org/abs/1506.01186
fastai/lr_find: https://github.com/fastai/fastai
.. versionadded:: 0.4.6
"""
_lr_schedule: Union[LRScheduler, PiecewiseLinear, ParamGroupScheduler]
def __init__(self) -> None:
self._diverge_flag = False
self._history: Dict[str, List[Any]] = {}
self._best_loss = None
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
def _run(
self,
trainer: Engine,
optimizer: Optimizer,
output_transform: Callable,
num_iter: int,
start_lrs: List[float],
end_lrs: List[float],
step_mode: str,
smooth_f: float,
diverge_th: float,
) -> None:
self._history = {"lr": [], "loss": []}
self._best_loss = None
self._diverge_flag = False
# attach LRScheduler to trainer.
if num_iter is None:
num_iter = trainer.state.epoch_length * trainer.state.max_epochs
else:
max_iter = trainer.state.epoch_length * trainer.state.max_epochs # type: ignore[operator]
if max_iter < num_iter:
max_iter = num_iter
trainer.state.max_iters = num_iter
trainer.state.max_epochs = ceil(num_iter / trainer.state.epoch_length) # type: ignore[operator]
if not trainer.has_event_handler(self._reached_num_iterations):
trainer.add_event_handler(Events.ITERATION_COMPLETED, self._reached_num_iterations, num_iter)
# attach loss and lr logging
if not trainer.has_event_handler(self._log_lr_and_loss):
trainer.add_event_handler(
Events.ITERATION_COMPLETED, self._log_lr_and_loss, output_transform, smooth_f, diverge_th
)
self.logger.debug(f"Running LR finder for {num_iter} iterations")
# Initialize the proper learning rate policy
if step_mode.lower() == "exp":
self._lr_schedule = LRScheduler(_ExponentialLR(optimizer, start_lrs, end_lrs, num_iter))
else:
if len(start_lrs) == 1:
self._lr_schedule = PiecewiseLinear(
optimizer,
param_name="lr",
milestones_values=[(0, start_lrs[0]), (num_iter, end_lrs[0])],
)
else:
self._lr_schedule = ParamGroupScheduler(
[
PiecewiseLinear(
optimizer,
param_name="lr",
milestones_values=[(0, start_lrs[i]), (num_iter, end_lrs[i])],
param_group_index=i,
)
for i in range(len(optimizer.param_groups))
]
)
if not trainer.has_event_handler(self._lr_schedule):
trainer.add_event_handler(Events.ITERATION_COMPLETED, self._lr_schedule, num_iter)
def _reset(self, trainer: Engine) -> None:
self.logger.debug("Completed LR finder run")
trainer.remove_event_handler(self._lr_schedule, Events.ITERATION_COMPLETED)
trainer.remove_event_handler(self._log_lr_and_loss, Events.ITERATION_COMPLETED)
trainer.remove_event_handler(self._reached_num_iterations, Events.ITERATION_COMPLETED)
def _log_lr_and_loss(self, trainer: Engine, output_transform: Callable, smooth_f: float, diverge_th: float) -> None:
output = trainer.state.output
loss = output_transform(output)
if not isinstance(loss, float):
if isinstance(loss, torch.Tensor):
if (loss.ndimension() == 0) or (loss.ndimension() == 1 and len(loss) == 1):
loss = loss.item()
else:
raise ValueError(
"if output of the engine is torch.Tensor, then "
"it must be 0d torch.Tensor or 1d torch.Tensor with 1 element, "
f"but got torch.Tensor of shape {loss.shape}"
)
else:
raise TypeError(
"output of the engine should be of type float or 0d torch.Tensor "
"or 1d torch.Tensor with 1 element, "
f"but got output of type {type(loss).__name__}"
)
loss = idist.all_reduce(loss)
lr = self._lr_schedule.get_param()
self._history["lr"].append(lr)
if trainer.state.iteration == 1:
self._best_loss = loss
else:
if smooth_f > 0:
loss = smooth_f * loss + (1 - smooth_f) * self._history["loss"][-1]
if loss < self._best_loss:
self._best_loss = loss
self._history["loss"].append(loss)
# Check if the loss has diverged; if it has, stop the trainer
if self._history["loss"][-1] > diverge_th * self._best_loss: # type: ignore[operator]
self._diverge_flag = True
self.logger.info("Stopping early, the loss has diverged")
trainer.terminate()
def _reached_num_iterations(self, trainer: Engine, num_iter: int) -> None:
if trainer.state.iteration > num_iter:
trainer.terminate()
def _warning(self, _: Any) -> None:
if not self._diverge_flag:
warnings.warn(
"Run completed without loss diverging, increase end_lr, decrease diverge_th or look"
" at lr_finder.plot()",
UserWarning,
)
def _detach(self, trainer: Engine) -> None:
"""
Detaches lr_finder from trainer.
Args:
trainer: the trainer to detach form.
"""
if trainer.has_event_handler(self._run, Events.STARTED):
trainer.remove_event_handler(self._run, Events.STARTED)
if trainer.has_event_handler(self._warning, Events.COMPLETED):
trainer.remove_event_handler(self._warning, Events.COMPLETED)
if trainer.has_event_handler(self._reset, Events.COMPLETED):
trainer.remove_event_handler(self._reset, Events.COMPLETED)
def get_results(self) -> Dict[str, List[Any]]:
"""
Returns:
Dictionary with loss and lr logs from the previous run
"""
return self._history
def plot(
self,
skip_start: int = 10,
skip_end: int = 5,
log_lr: bool = True,
display_suggestion: bool = True,
ax: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Plots the learning rate range test.
This method requires ``matplotlib`` package to be installed:
.. code-block:: bash
pip install matplotlib
Args:
skip_start: number of batches to trim from the start.
Default: 10.
skip_end: number of batches to trim from the start.
Default: 5.
log_lr: True to plot the learning rate in a logarithmic
scale; otherwise, plotted in a linear scale. Default: True.
display_suggestion: if True, red dot shows the suggested learning rate.
ax: Pre-existing axes for the plot. Default: None.
kwargs: optional kwargs passed to ``plt.subplots`` if ``ax`` is not provided.
.. code-block:: python
ax = lr_finder.plot(skip_end=0)
ax.figure.savefig("output.jpg")
"""
try:
from matplotlib import pyplot as plt
except ImportError:
raise ModuleNotFoundError(
"This method requires matplotlib to be installed. "
"Please install it with command: \n pip install matplotlib"
)
if not self._history:
raise RuntimeError("learning rate finder didn't run yet so results can't be plotted")
if skip_start < 0:
raise ValueError("skip_start cannot be negative")
if skip_end < 0:
raise ValueError("skip_end cannot be negative")
# Get the data to plot from the history dictionary.
lrs = self._history["lr"]
losses = self._history["loss"]
num_groups = len(lrs[0]) if isinstance(lrs[0], list) else 1
legends = [f"suggested lr for param_groups {i}" for i in range(num_groups)]
if ax is None:
fig, ax = plt.subplots(**kwargs)
# Check to show the suggested learning rate
if display_suggestion:
sug_lr = self.lr_suggestion()
idx = self._history["lr"].index(sug_lr)
if skip_start >= idx:
warnings.warn(
"skip_start is larger than the suggested LR found"
" and it will not be visible on the plot. Please, make the value smaller.",
UserWarning,
)
corresponding_loss = self._history["loss"][int(idx)]
# Check if optimizer has multiple param_groups
if not isinstance(sug_lr, list):
sug_lr = [
sug_lr,
]
for lr in sug_lr:
ax.scatter(
lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3
)
# handle skip_end=0 properly
if skip_end == 0:
lrs = lrs[skip_start:]
losses = losses[skip_start:]
else:
lrs = lrs[skip_start:-skip_end]
losses = losses[skip_start:-skip_end]
plt.legend(legends)
# Plot loss as a function of the learning rate
ax.plot(lrs, losses)
if log_lr:
ax.set_xscale("log")
lr_min = min(lrs[0]) if isinstance(lrs[0], list) else lrs[0]
lr_max = max(lrs[-1]) if isinstance(lrs[-1], list) else lrs[-1]
ax.set_xlim([lr_min, lr_max])
ax.set_xlabel("Learning rate")
ax.set_ylabel("Loss")
plt.show()
return ax
def lr_suggestion(self) -> Any:
"""
Returns:
Learning rate at the minimum numerical gradient
(ignoring the increasing part of the curve)
"""
if not self._history:
raise RuntimeError("learning rate finder didn't run yet so lr_suggestion can't be returned")
loss = self._history["loss"]
min_loss_idx = torch.tensor(loss).argmin()
# Ignore the increasing part of the curve
decreasing_losses = self._history["loss"][: int(min_loss_idx.item()) + 1]
if len(decreasing_losses) < 3:
raise RuntimeError(
"FastaiLRFinder got unexpected curve shape, the curve should be somehow U-shaped, "
"please decrease start_lr or increase end_lr to resolve this issue."
)
losses = torch.tensor(decreasing_losses)
grads = torch.tensor([0.5 * (losses[i + 1] - losses[i - 1]) for i in range(1, len(losses) - 1)])
min_grad_idx = grads.argmin() + 1
return self._history["lr"][int(min_grad_idx)]
def apply_suggested_lr(self, optimizer: Optimizer) -> None:
"""
Applying the suggested learning rate(s) on the given optimizer.
Args:
optimizer: the optimizer to apply the suggested learning rate(s) on.
Note:
The given optimizer must be the same as the one we before found the suggested learning rate for.
"""
sug_lr = self.lr_suggestion()
if not isinstance(sug_lr, list):
sug_lr = [
sug_lr,
]
if len(sug_lr) != len(optimizer.param_groups):
raise RuntimeError(
"The number of parameter groups does not match between "
"given optimizer and the one used for estimating the "
f"learning rate: {len(sug_lr)} vs {len(optimizer.param_groups)}"
)
for i, lr in enumerate(sug_lr):
optimizer.param_groups[i]["lr"] = lr
@contextlib.contextmanager
def attach(
self,
trainer: Engine,
to_save: Mapping,
output_transform: Callable = lambda output: output,
num_iter: Optional[int] = None,
start_lr: Optional[Union[float, List[float]]] = None,
end_lr: Optional[Union[float, List[float]]] = 10.0,
step_mode: str = "exp",
smooth_f: float = 0.05,
diverge_th: float = 5.0,
) -> Any:
"""Attaches lr_finder to a given trainer. It also resets model and optimizer at the end of the run.
Args:
trainer: lr_finder is attached to this trainer. Please, keep in mind that all attached handlers
will be executed.
to_save: dictionary with optimizer and other objects that needs to be restored after running
the LR finder. For example, ``to_save={'optimizer': optimizer, 'model': model}``.
It should contain "optimizer" key for the optimizer.
Also all objects should implement ``state_dict`` and ``load_state_dict`` methods.
output_transform: function that transforms the trainer's ``state.output`` after each
iteration. It must return the loss of that iteration.
num_iter: number of iterations for lr schedule between base lr and end_lr. Default, it will
run for ``trainer.state.epoch_length * trainer.state.max_epochs``.
start_lr: lower bound for lr search. Default, Learning Rate specified with the optimizer.
end_lr: upper bound for lr search. Default, 10.0.
step_mode: "exp" or "linear", which way should the lr be increased from ``start_lr``
to ``end_lr``. Default, "exp".
smooth_f: loss smoothing factor in range ``[0, 1)``. Default, 0.05
diverge_th: Used for stopping the search when ``current loss > diverge_th * best_loss``.
Default, 5.0.
Returns:
trainer_with_lr_finder (trainer used for finding the lr)
Examples:
.. code-block:: python
to_save = {"model": model, "optimizer": optimizer}
with lr_finder.attach(trainer, to_save=to_save) as trainer_with_lr_finder:
trainer_with_lr_finder.run(dataloader)
Note:
lr_finder cannot be attached to more than one trainer at a time.
"""
if not isinstance(to_save, Mapping):
raise TypeError(f"Argument to_save should be a mapping, but given {type(to_save)}")
Checkpoint._check_objects(to_save, "state_dict")
Checkpoint._check_objects(to_save, "load_state_dict")
if "optimizer" not in to_save:
raise ValueError("Mapping to_save should contain 'optimizer' key")
if not isinstance(to_save["optimizer"], torch.optim.Optimizer):
raise TypeError(
f"Object to_save['optimizer'] should be torch optimizer, but given {type(to_save['optimizer'])}"
)
if smooth_f < 0 or smooth_f >= 1:
raise ValueError("smooth_f is outside the range [0, 1]")
if diverge_th < 1:
raise ValueError("diverge_th should be larger than 1")
if step_mode not in ["exp", "linear"]:
raise ValueError(f"step_mode should be 'exp' or 'linear', but given {step_mode}")
if num_iter is not None:
if not isinstance(num_iter, int):
raise TypeError(f"if provided, num_iter should be an integer, but give {num_iter}")
if num_iter <= 0:
raise ValueError(f"if provided, num_iter should be positive, but give {num_iter}")
optimizer = to_save["optimizer"]
if start_lr is None:
start_lrs = [pg["lr"] for pg in optimizer.param_groups]
elif isinstance(start_lr, float):
start_lrs = [start_lr] * len(optimizer.param_groups)
elif isinstance(start_lr, list):
if len(start_lr) != len(optimizer.param_groups):
raise ValueError(
"Number of values of start_lr should be equal to optimizer values."
f"start_lr values:{len(start_lr)} optimizer values: {len(optimizer.param_groups)}"
)
start_lrs = start_lr
else:
raise TypeError(f"start_lr should be a float or list of floats, but given {type(start_lr)}")
if isinstance(end_lr, float):
end_lrs = [end_lr] * len(optimizer.param_groups)
elif isinstance(end_lr, list):
if len(end_lr) != len(optimizer.param_groups):
raise ValueError(
"Number of values of end_lr should be equal to optimizer values."
f"end_lr values:{len(end_lr)} optimizer values: {len(optimizer.param_groups)}"
)
end_lrs = end_lr
else:
raise TypeError(f"end_lr should be a float or list of floats, but given {type(end_lr)}")
for start, end in zip(start_lrs, end_lrs):
if start >= end:
raise ValueError(f"start_lr must be less than end_lr, start_lr={start_lr} vs end_lr={end_lr}")
# store to_save
with tempfile.TemporaryDirectory() as tmpdirname:
obj = {k: o.state_dict() for k, o in to_save.items()}
# add trainer
obj["trainer"] = trainer.state_dict()
cache_filepath = Path(tmpdirname) / "ignite_lr_finder_cache.pt"
torch.save(obj, cache_filepath.as_posix())
# Attach handlers
if not trainer.has_event_handler(self._run):
trainer.add_event_handler(
Events.STARTED,
self._run,
optimizer,
output_transform,
num_iter,
start_lrs,
end_lrs,
step_mode,
smooth_f,
diverge_th,
)
if not trainer.has_event_handler(self._warning):
trainer.add_event_handler(Events.COMPLETED, self._warning)
if not trainer.has_event_handler(self._reset):
trainer.add_event_handler(Events.COMPLETED, self._reset)
yield trainer
self._detach(trainer)
# restore to_save and reset trainer's state
obj = torch.load(cache_filepath.as_posix())
trainer.load_state_dict(obj["trainer"])
for k, o in obj.items():
if k in to_save:
to_save[k].load_state_dict(o)
class _ExponentialLR(_LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of
iterations.
Args:
optimizer: wrapped optimizer.
start_lrs: the initial learning rate for parameter groups.
end_lrs: the final learning rate for parameter groups.
num_iter: the number of iterations over which the test
occurs. Default: 100.
last_epoch: the index of last epoch. Default: -1.
"""
def __init__(
self, optimizer: Optimizer, start_lrs: List[float], end_lrs: List[float], num_iter: int, last_epoch: int = -1
):
self.end_lrs = end_lrs
self.num_iter = num_iter
super(_ExponentialLR, self).__init__(optimizer, last_epoch)
# override base_lrs
self.base_lrs = start_lrs
def get_lr(self) -> List[float]: # type: ignore[override]
curr_iter = self.last_epoch + 1
r = curr_iter / self.num_iter
return [base_lr * (end_lr / base_lr) ** r for end_lr, base_lr in zip(self.end_lrs, self.base_lrs)]
|
from typing import Any, Callable, Optional
from ignite.engine import Engine
from ignite.engine.events import Events
from ignite.handlers.checkpoint import Checkpoint, DiskSaver, ModelCheckpoint
from ignite.handlers.early_stopping import EarlyStopping
from ignite.handlers.ema_handler import EMAHandler
from ignite.handlers.lr_finder import FastaiLRFinder
from ignite.handlers.param_scheduler import (
BaseParamScheduler,
ConcatScheduler,
CosineAnnealingScheduler,
create_lr_scheduler_with_warmup,
CyclicalScheduler,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
ParamScheduler,
PiecewiseLinear,
ReduceLROnPlateauScheduler,
)
from ignite.handlers.state_param_scheduler import (
ExpStateScheduler,
LambdaStateScheduler,
MultiStepStateScheduler,
PiecewiseLinearStateScheduler,
StateParamScheduler,
StepStateScheduler,
)
from ignite.handlers.stores import EpochOutputStore
from ignite.handlers.terminate_on_nan import TerminateOnNan
from ignite.handlers.time_limit import TimeLimit
from ignite.handlers.time_profilers import BasicTimeProfiler, HandlersTimeProfiler
from ignite.handlers.timing import Timer
__all__ = [
"ModelCheckpoint",
"Checkpoint",
"DiskSaver",
"Timer",
"EarlyStopping",
"TerminateOnNan",
"global_step_from_engine",
"TimeLimit",
"EpochOutputStore",
"ConcatScheduler",
"CosineAnnealingScheduler",
"LinearCyclicalScheduler",
"LRScheduler",
"ParamGroupScheduler",
"ParamScheduler",
"PiecewiseLinear",
"CyclicalScheduler",
"create_lr_scheduler_with_warmup",
"FastaiLRFinder",
"EMAHandler",
"BasicTimeProfiler",
"HandlersTimeProfiler",
"BaseParamScheduler",
"StateParamScheduler",
"LambdaStateScheduler",
"PiecewiseLinearStateScheduler",
"ExpStateScheduler",
"StepStateScheduler",
"MultiStepStateScheduler",
"ReduceLROnPlateauScheduler",
]
def global_step_from_engine(engine: Engine, custom_event_name: Optional[Events] = None) -> Callable:
"""Helper method to setup `global_step_transform` function using another engine.
This can be helpful for logging trainer epoch/iteration while output handler is attached to an evaluator.
Args:
engine: engine which state is used to provide the global step
custom_event_name: registered event name. Optional argument, event name to use.
Returns:
global step based on provided engine
"""
def wrapper(_: Any, event_name: Events) -> int:
if custom_event_name is not None:
event_name = custom_event_name
return engine.state.get_event_attrib_value(event_name)
return wrapper
|
from typing import Any, Callable, List, Optional
from ignite.engine import Engine, Events
class EpochOutputStore:
"""EpochOutputStore handler to save output prediction and target history
after every epoch, could be useful for e.g., visualization purposes.
Note:
This can potentially lead to a memory error if the output data is
larger than available RAM.
Args:
output_transform: a callable that is used to
transform the :class:`~ignite.engine.engine.Engine`'s
``process_function``'s output , e.g., lambda x: x[0]
Attributes:
data: a list of :class:`~ignite.engine.engine.Engine` outputs,
optionally transformed by `output_transform`.
Examples:
.. code-block:: python
eos = EpochOutputStore()
trainer = create_supervised_trainer(model, optimizer, loss)
train_evaluator = create_supervised_evaluator(model, metrics)
eos.attach(train_evaluator, 'output')
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
train_evaluator.run(train_loader)
output = train_evaluator.state.output
# output = [(y_pred0, y0), (y_pred1, y1), ...]
# do something with output, e.g., plotting
.. versionadded:: 0.4.5
.. versionchanged:: 0.4.5
`attach` now accepts an optional argument `name`
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.data: List[Any] = []
self.output_transform = output_transform
def reset(self) -> None:
"""Reset the attribute data to empty list."""
self.data = []
def update(self, engine: Engine) -> None:
"""Append the output of Engine to attribute data."""
output = self.output_transform(engine.state.output)
self.data.append(output)
def store(self, engine: Engine) -> None:
"""Store `self.data` on `engine.state.{self.name}`"""
setattr(engine.state, self.name, self.data)
def attach(self, engine: Engine, name: Optional[str] = None) -> None:
"""Attaching `reset` method at EPOCH_STARTED and
`update` method at ITERATION_COMPLETED.
If `name` is passed, will store `self.data` on `engine.state`
under `name`.
"""
engine.add_event_handler(Events.EPOCH_STARTED, self.reset)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.update)
if name:
self.name = name
engine.add_event_handler(Events.EPOCH_COMPLETED, self.store)
|
import functools
from collections import OrderedDict
from typing import Any, Callable, cast, Dict, List, Mapping, Sequence, Tuple, Union
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
Events.INTERRUPT,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times: Dict[EventEnum, torch.Tensor] = {}
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[float, float]]]:
# compute on non-zero data:
data = data[data > 0]
out: List[Tuple[str, Union[str, float, Tuple[float, float]]]] = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
]
if len(data) > 1:
out.extend(
[
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
)
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time: Union[int, torch.Tensor] = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
)
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise ModuleNotFoundError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times: List[float] = []
self.processing_times: List[float] = []
self.event_handlers_times: Dict[EventEnum, Dict[str, List[float]]] = {}
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total: Union[str, float] = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered"
min_index: Tuple[Union[str, float], Union[str, float]] = ("None", "None")
max_index: Tuple[Union[str, float], Union[str, float]] = ("None", "None")
mean: Union[str, float] = "None"
std: Union[str, float] = "None"
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise ModuleNotFoundError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
|
import numbers
import warnings
from bisect import bisect_right
from typing import Any, List, Sequence, Tuple, Union
from ignite.engine import CallableEventWithFilter, Engine, Events, EventsList
from ignite.handlers.param_scheduler import BaseParamScheduler
class StateParamScheduler(BaseParamScheduler):
"""An abstract class for updating an engine state parameter values during training.
Args:
param_name: name of parameter to update.
save_history: whether to log the parameter values to ``engine.state.param_history``, (default=False).
create_new: whether to create ``param_name`` on ``engine.state`` taking into account whether ``param_name``
attribute already exists or not. Overrides existing attribute by default, (default=False).
Note:
Parameter scheduler works independently of the internal state of the attached engine.
More precisely, whatever the state of the engine (newly created or used by another scheduler) the scheduler
sets defined absolute values.
.. versionadded:: 0.4.7
"""
def __init__(self, param_name: str, save_history: bool = False, create_new: bool = False):
super(StateParamScheduler, self).__init__(param_name, save_history)
self.create_new = create_new
def attach(
self,
engine: Engine,
event: Union[str, Events, CallableEventWithFilter, EventsList] = Events.ITERATION_COMPLETED,
) -> None:
"""Attach the handler to the engine. Once the handler is attached, the ``Engine.state`` will have a new
attribute with the name ``param_name``. Then the current value of the parameter can be retrieved from
``Engine.state`` when the engine is running.
Args:
engine: trainer to which the handler will be attached.
event: trigger ``param_name`` value update.
"""
if hasattr(engine.state, self.param_name):
if self.create_new:
raise ValueError(
f"Attribute '{self.param_name}' already exists in the engine.state. "
f"This may be a conflict between multiple handlers. "
f"Please choose another name."
)
else:
if not self.create_new:
warnings.warn(
f"Attribute '{self.param_name}' is not defined in the engine.state. "
f"{type(self).__name__} will create it. Remove this warning by setting create_new=True."
)
setattr(engine.state, self.param_name, None)
if self.save_history:
if not hasattr(engine.state, "param_history") or engine.state.param_history is None:
setattr(engine.state, "param_history", {})
engine.state.param_history.setdefault(self.param_name, []) # type: ignore[attr-defined]
engine.add_event_handler(event, self)
def __call__(self, engine: Engine) -> None:
self.event_index += 1
value = self.get_param()
setattr(engine.state, self.param_name, value)
if self.save_history:
engine.state.param_history[self.param_name].append(value) # type: ignore[attr-defined]
@classmethod
def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[int]]:
"""Method to simulate scheduled engine state parameter values during `num_events` events.
Args:
num_events: number of events during the simulation.
scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
event_index, value
Examples:
.. code-block:: python
import matplotlib.pyplot as plt
import numpy as np
step_state_param_values = np.array(
StepStateScheduler.simulate_values(
num_events=20, param_name="step_scheduled_param", initial_value=10, gamma=0.99, step_size=5
)
)
plt.plot(step_state_param_values[:, 0], step_state_param_values[:, 1], label="learning rate")
plt.xlabel("events")
plt.ylabel("values")
plt.legend()
"""
for key in ["save_history"]:
if key in scheduler_kwargs:
del scheduler_kwargs[key]
values = []
scheduler = cls(save_history=False, **scheduler_kwargs)
engine = Engine(lambda e, b: None)
for i in range(num_events):
scheduler(engine=engine)
values.append([i, getattr(engine.state, scheduler_kwargs["param_name"])])
return values
class LambdaStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a user defined callable object.
User defined callable object is taking an event index as input and returns parameter value.
Args:
lambda_obj: user defined callable object.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
class LambdaState:
def __init__(self, initial_value, gamma):
self.initial_value = initial_value
self.gamma = gamma
def __call__(self, event_index):
return self.initial_value * self.gamma ** (event_index % 9)
param_scheduler = LambdaStateScheduler(
param_name="param", lambda_obj=LambdaState(1, 0.9), create_new=True
)
# parameter is param, initial_value sets param to 1 and in this example gamma = 1
# using class 'LambdaState' user defined callable object can be created
# update a parameter during training by using a user defined callable object
# user defined callable object is taking an event index as input and returns parameter value
# in this example, we update as initial_value * gamma ** (event_endex % 9)
# in every Epoch the parameter is updated as 1 * 0.9 ** (Epoch % 9)
# In Epoch 3, parameter param = 1 * 0.9 ** (3 % 9) = 0.729
# In Epoch 10, parameter param = 1 * 0.9 ** (10 % 9) = 0.9
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=10)
.. testoutput::
0.9
0.81
0.7290...
0.6561
0.5904...
0.5314...
0.4782...
0.4304...
1.0
0.9
.. versionadded:: 0.4.7
"""
def __init__(self, lambda_obj: Any, param_name: str, save_history: bool = False, create_new: bool = False):
super(LambdaStateScheduler, self).__init__(param_name, save_history, create_new)
if not callable(lambda_obj):
raise ValueError("Expected lambda_obj to be callable.")
self.lambda_obj = lambda_obj
self._state_attrs += ["lambda_obj"]
def get_param(self) -> Union[List[float], float]:
return self.lambda_obj(self.event_index)
class PiecewiseLinearStateScheduler(StateParamScheduler):
"""Piecewise linear state parameter scheduler.
Args:
milestones_values: list of tuples (event index, parameter value)
represents milestones and parameter values. Milestones should be increasing integers.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = PiecewiseLinearStateScheduler(
param_name="param", milestones_values=[(5, 1.0), (10, 0.8), (15, 0.6)], create_new=True
)
# parameter is param, milestone (5, 1.0) sets param to 1.0
# milestone is (5, 1.0), param=1 for Epoch 1 to 5,
# next milestone is (10, 0.8), param linearly reduces from 1.0 to 0.8
# Epoch 10, param = 0.8
# next milestone is (15,0.6), param linearly reduces from 0.8 to 0.6
# Epoch 15, param = 0.6
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=15)
.. testoutput::
1.0
1.0
1.0
1.0
1.0
0.96
0.92
0.88
0.8400...
0.8
0.76
0.72
0.68
0.64
0.6
.. versionadded:: 0.4.7
"""
def __init__(
self,
milestones_values: List[Tuple[int, float]],
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(PiecewiseLinearStateScheduler, self).__init__(param_name, save_history, create_new)
if not isinstance(milestones_values, Sequence):
raise TypeError(
f"Argument milestones_values should be a list or tuple, but given {type(milestones_values)}"
)
if len(milestones_values) < 1:
raise ValueError(
f"Argument milestones_values should be with at least one value, but given {milestones_values}"
)
values: List[float] = []
milestones: List[int] = []
for pair in milestones_values:
if not isinstance(pair, tuple) or len(pair) != 2:
raise ValueError("Argument milestones_values should be a list of pairs (milestone, param_value)")
if not isinstance(pair[0], numbers.Integral):
raise TypeError(f"Value of a milestone should be integer, but given {type(pair[0])}")
if len(milestones) > 0 and pair[0] < milestones[-1]:
raise ValueError(
f"Milestones should be increasing integers, but given {pair[0]} is smaller "
f"than the previous milestone {milestones[-1]}"
)
milestones.append(pair[0])
values.append(pair[1])
self.values = values
self.milestones = milestones
self._index = 0
self._state_attrs += ["values", "milestones", "_index"]
def _get_start_end(self) -> Tuple[int, int, float, float]:
if self.milestones[0] > self.event_index:
return self.event_index - 1, self.event_index, self.values[0], self.values[0]
elif self.milestones[-1] <= self.event_index:
return (self.event_index, self.event_index + 1, self.values[-1], self.values[-1])
elif self.milestones[self._index] <= self.event_index < self.milestones[self._index + 1]:
return (
self.milestones[self._index],
self.milestones[self._index + 1],
self.values[self._index],
self.values[self._index + 1],
)
else:
self._index += 1
return self._get_start_end()
def get_param(self) -> Union[List[float], float]:
start_index, end_index, start_value, end_value = self._get_start_end()
return start_value + (end_value - start_value) * (self.event_index - start_index) / (end_index - start_index)
class ExpStateScheduler(StateParamScheduler):
"""Update a parameter during training by using exponential function.
The function decays the parameter value by gamma every step.
Based on the closed form of ExponentialLR from PyTorch
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = ExpStateScheduler(
param_name="param", initial_value=1, gamma=0.9, create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1, param changes from 1 to 1*0.9, param = 0.9
# Epoch 2, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 3, param changes from 0.81 to 0.81*0.9, param = 0.729
# Epoch 4, param changes from 0.81 to 0.729*0.9, param = 0.6561
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=4)
.. testoutput::
0.9
0.81
0.7290...
0.6561
.. versionadded:: 0.4.7
"""
def __init__(
self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False
):
super(ExpStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self._state_attrs += ["initial_value", "gamma"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma**self.event_index
class StepStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a step function.
This function decays the parameter value by gamma every step_size.
Based on StepLR from PyTorch.
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
step_size: Period of parameter value decay.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = StepStateScheduler(
param_name="param", initial_value=1, gamma=0.9, step_size=5, create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1 to 4, param does not change as step size is 5,
# Epoch 5, param changes from 1 to 1*0.9, param = 0.9
# Epoch 5 to 9, param = 0.9 as step size is 5,
# Epoch 10, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 10 to 14, param = 0.81, as step size is 5
# Epoch 15, param changes from 0.81 to 0.81*0.9, param = 0.729
# and so on ... the param change at Epoch = 5, 10, 15, 20, . . .
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED(every=5))
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=25)
.. testoutput::
0.9
0.81
0.7290...
0.6561
0.5904...
.. versionadded:: 0.4.7
"""
def __init__(
self,
initial_value: float,
gamma: float,
step_size: int,
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(StepStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self.step_size = step_size
self._state_attrs += ["initial_value", "gamma", "step_size"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma ** (self.event_index // self.step_size)
class MultiStepStateScheduler(StateParamScheduler):
"""Update a parameter during training by using a multi step function.
The function decays the parameter value by gamma once the number of steps reaches one of the milestones.
Based on MultiStepLR from PyTorch.
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.MultiStepLR.html
Args:
initial_value: Starting value of the parameter.
gamma: Multiplicative factor of parameter value decay.
milestones: List of step indices. Must be increasing.
param_name: name of parameter to update.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
create_new: whether to create ``param_name`` on
``engine.state`` taking into account whether
``param_name`` attribute already exists or not.
Overrides existing attribute by default, (default=False).
Examples:
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
default_trainer = get_default_trainer()
param_scheduler = MultiStepStateScheduler(
param_name="param", initial_value=1, gamma=0.9, milestones=[3, 6, 9, 12], create_new=True
)
# parameter is param, initial_value sets param to 1, gamma is set as 0.9
# Epoch 1 to 2, param does not change as milestone is 3
# Epoch 3, param changes from 1 to 1*0.9, param = 0.9
# Epoch 3 to 5, param does not change as milestone is 6
# Epoch 6, param changes from 0.9 to 0.9*0.9, param = 0.81
# Epoch 6 to 8, param does not change as milestone is 9
# Epoch 9, param changes from 0.81 to 0.81*0.9, param = 0.729
# Epoch 9 to 11, param does not change as milestone is 12
# Epoch 12, param changes from 0.729 to 0.729*0.9, param = 0.6561
param_scheduler.attach(default_trainer, Events.EPOCH_COMPLETED)
@default_trainer.on(Events.EPOCH_COMPLETED)
def print_param():
print(default_trainer.state.param)
default_trainer.run([0], max_epochs=12)
.. testoutput::
1.0
1.0
0.9
0.9
0.9
0.81
0.81
0.81
0.7290...
0.7290...
0.7290...
0.6561
.. versionadded:: 0.4.7
"""
def __init__(
self,
initial_value: float,
gamma: float,
milestones: List[int],
param_name: str,
save_history: bool = False,
create_new: bool = False,
):
super(MultiStepStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
self.gamma = gamma
self.milestones = milestones
self._state_attrs += ["initial_value", "gamma", "milestones"]
def get_param(self) -> Union[List[float], float]:
return self.initial_value * self.gamma ** bisect_right(self.milestones, self.event_index)
|
import time
from typing import Optional
from ignite.engine import Engine
__all__ = ["TimeLimit"]
from ignite.utils import setup_logger
class TimeLimit:
"""TimeLimit handler can be used to control training time for computing environments where session time is limited.
Timer starts when handler is created and not training started.
This handler gracefully terminates the training if time passed in the training exceeds a limit.
Args:
limit_sec: Maximum time before training terminates (in seconds). Defaults to 28800.
Examples:
.. code-block:: python
from ignite.engine import Events
from ignite.handlers import TimeLimit
handler = TimeLimit() # 8 hours of training
trainer.add_event_handler(Events.ITERATION_COMPLETED, handler)
.. versionadded:: 0.4.3
"""
def __init__(self, limit_sec: Optional[int] = 28800):
if not isinstance(limit_sec, int):
raise TypeError("Argument limit_sec should be an integer.")
if limit_sec <= 0:
raise ValueError("Argument limit_sec should be a positive integer.")
self.limit_sec = limit_sec
self.start_time = time.time()
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
def __call__(self, engine: Engine) -> None:
elapsed_time = time.time() - self.start_time
if elapsed_time > self.limit_sec:
self.logger.info("Reached the time limit: {} sec. Stop training".format(self.limit_sec))
engine.terminate()
|
from time import perf_counter
from typing import Any, Optional
from ignite.engine import Engine, Events
__all__ = ["Timer"]
class Timer:
"""Timer object can be used to measure (average) time between events.
Args:
average: if True, then when ``.value()`` method is called, the returned value
will be equal to total time measured, divided by the value of internal counter.
Attributes:
total (float): total time elapsed when the Timer was running (in seconds).
step_count (int): internal counter, useful to measure average time, e.g. of processing a single batch.
Incremented with the ``.step()`` method.
running (bool): flag indicating if timer is measuring time.
Note:
When using ``Timer(average=True)`` do not forget to call ``timer.step()`` every time an event occurs. See
the examples below.
Examples:
Measuring total time of the epoch:
.. code-block:: python
from ignite.handlers import Timer
import time
work = lambda : time.sleep(0.1)
idle = lambda : time.sleep(0.1)
t = Timer(average=False)
for _ in range(10):
work()
idle()
t.value()
# 2.003073937026784
Measuring average time of the epoch:
.. code-block:: python
t = Timer(average=True)
for _ in range(10):
work()
idle()
t.step()
t.value()
# 0.2003182829997968
Measuring average time it takes to execute a single ``work()`` call:
.. code-block:: python
t = Timer(average=True)
for _ in range(10):
t.resume()
work()
t.pause()
idle()
t.step()
t.value()
# 0.10016545779653825
Using the Timer to measure average time it takes to process a single batch of examples:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import Timer
trainer = Engine(training_update_function)
timer = Timer(average=True)
timer.attach(
trainer,
start=Events.STARTED,
resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED,
step=Events.ITERATION_COMPLETED
)
"""
def __init__(self, average: bool = False):
self._average = average
self.reset()
def attach(
self,
engine: Engine,
start: Events = Events.STARTED,
pause: Events = Events.COMPLETED,
resume: Optional[Events] = None,
step: Optional[Events] = None,
) -> "Timer":
"""Register callbacks to control the timer.
Args:
engine: Engine that this timer will be attached to.
start: Event which should start (reset) the timer.
pause: Event which should pause the timer.
resume: Event which should resume the timer.
step: Event which should call the `step` method of the counter.
Returns:
this timer
"""
engine.add_event_handler(start, self.reset)
engine.add_event_handler(pause, self.pause)
if resume is not None:
engine.add_event_handler(resume, self.resume)
if step is not None:
engine.add_event_handler(step, self.step)
return self
def reset(self, *args: Any) -> "Timer":
"""Reset the timer to zero."""
self._t0 = perf_counter()
self.total = 0.0
self.step_count = 0.0
self.running = True
return self
def pause(self, *args: Any) -> None:
"""Pause the current running timer."""
if self.running:
self.total += self._elapsed()
self.running = False
def resume(self, *args: Any) -> None:
"""Resume the current running timer."""
if not self.running:
self.running = True
self._t0 = perf_counter()
def value(self) -> float:
"""Return the average timer value."""
total = self.total
if self.running:
total += self._elapsed()
if self._average:
denominator = max(self.step_count, 1.0)
else:
denominator = 1.0
return total / denominator
def step(self, *args: Any) -> None:
"""Increment the timer."""
self.step_count += 1.0
def _elapsed(self) -> float:
return perf_counter() - self._t0
|
from collections import OrderedDict
from typing import Callable, cast, Mapping, Optional
from ignite.base import Serializable
from ignite.engine import Engine
from ignite.utils import setup_logger
__all__ = ["EarlyStopping"]
class EarlyStopping(Serializable):
"""EarlyStopping handler can be used to stop the training if no improvement after a given number of events.
Args:
patience: Number of events to wait if no improvement and then stop the training.
score_function: It should be a function taking a single argument, an :class:`~ignite.engine.engine.Engine`
object, and return a score `float`. An improvement is considered if the score is higher.
trainer: Trainer engine to stop the run if no improvement.
min_delta: A minimum increase in the score to qualify as an improvement,
i.e. an increase of less than or equal to `min_delta`, will count as no improvement.
cumulative_delta: It True, `min_delta` defines an increase since the last `patience` reset, otherwise,
it defines an increase after the last event. Default value is False.
Examples:
.. code-block:: python
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def score_function(engine):
val_loss = engine.state.metrics['nll']
return -val_loss
handler = EarlyStopping(patience=10, score_function=score_function, trainer=trainer)
# Note: the handler is attached to an *Evaluator* (runs one epoch on validation dataset).
evaluator.add_event_handler(Events.COMPLETED, handler)
"""
_state_dict_all_req_keys = (
"counter",
"best_score",
)
def __init__(
self,
patience: int,
score_function: Callable,
trainer: Engine,
min_delta: float = 0.0,
cumulative_delta: bool = False,
):
if not callable(score_function):
raise TypeError("Argument score_function should be a function.")
if patience < 1:
raise ValueError("Argument patience should be positive integer.")
if min_delta < 0.0:
raise ValueError("Argument min_delta should not be a negative number.")
if not isinstance(trainer, Engine):
raise TypeError("Argument trainer should be an instance of Engine.")
self.score_function = score_function
self.patience = patience
self.min_delta = min_delta
self.cumulative_delta = cumulative_delta
self.trainer = trainer
self.counter = 0
self.best_score: Optional[float] = None
self.logger = setup_logger(__name__ + "." + self.__class__.__name__)
def __call__(self, engine: Engine) -> None:
score = self.score_function(engine)
if self.best_score is None:
self.best_score = score
elif score <= self.best_score + self.min_delta:
if not self.cumulative_delta and score > self.best_score:
self.best_score = score
self.counter += 1
self.logger.debug("EarlyStopping: %i / %i" % (self.counter, self.patience))
if self.counter >= self.patience:
self.logger.info("EarlyStopping: Stop training")
self.trainer.terminate()
else:
self.best_score = score
self.counter = 0
def state_dict(self) -> "OrderedDict[str, float]":
"""Method returns state dict with ``counter`` and ``best_score``.
Can be used to save internal state of the class.
"""
return OrderedDict([("counter", self.counter), ("best_score", cast(float, self.best_score))])
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replace internal state of the class with provided state dict data.
Args:
state_dict: a dict with "counter" and "best_score" keys/values.
"""
super().load_state_dict(state_dict)
self.counter = state_dict["counter"]
self.best_score = state_dict["best_score"]
|
from collections import OrderedDict
from collections.abc import Mapping
from typing import Tuple
class Serializable:
_state_dict_all_req_keys: Tuple = ()
_state_dict_one_of_opt_keys: Tuple = ()
def state_dict(self) -> OrderedDict:
raise NotImplementedError
def load_state_dict(self, state_dict: Mapping) -> None:
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
for k in self._state_dict_all_req_keys:
if k not in state_dict:
raise ValueError(
f"Required state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'"
)
opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]
if len(opts) > 0 and ((not any(opts)) or (all(opts))):
raise ValueError(f"state_dict should contain only one of '{self._state_dict_one_of_opt_keys}' keys")
|
from ignite.base.mixins import Serializable
|
# Needed to collect coverage data
|
import logging
import sys
from collections import namedtuple
import pytest
import torch
from packaging.version import Version
from ignite.engine import Engine, Events
from ignite.utils import convert_tensor, deprecated, hash_checkpoint, setup_logger, to_onehot
def test_convert_tensor():
x = torch.tensor([0.0])
tensor = convert_tensor(x)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=True)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=False)
assert torch.is_tensor(tensor)
x = [torch.tensor([0.0]), torch.tensor([0.0])]
list_ = convert_tensor(x)
assert isinstance(list_, list)
assert torch.is_tensor(list_[0])
assert torch.is_tensor(list_[1])
x = (torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, tuple)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
Point = namedtuple("Point", ["x", "y"])
x = Point(torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, Point)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
x = {"a": torch.tensor([0.0]), "b": torch.tensor([0.0])}
dict_ = convert_tensor(x)
assert isinstance(dict_, dict)
assert torch.is_tensor(dict_["a"])
assert torch.is_tensor(dict_["b"])
assert convert_tensor("a") == "a"
with pytest.raises(TypeError):
convert_tensor(12345)
def test_to_onehot():
indices = torch.tensor([0, 1, 2, 3], dtype=torch.long)
actual = to_onehot(indices, 4)
expected = torch.eye(4, dtype=torch.uint8)
assert actual.equal(expected)
y = torch.randint(0, 21, size=(1000,))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 250, 255))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 150, 155, 4, 6))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
# Test with `TorchScript`
x = torch.tensor([0, 1, 2, 3])
# Test the raw `to_onehot` function
scripted_to_onehot = torch.jit.script(to_onehot)
assert scripted_to_onehot(x, 4).allclose(to_onehot(x, 4))
# Test inside `torch.nn.Module`
class SLP(torch.nn.Module):
def __init__(self):
super(SLP, self).__init__()
self.linear = torch.nn.Linear(4, 1)
def forward(self, x):
x = to_onehot(x, 4)
return self.linear(x.to(torch.float))
eager_model = SLP()
scripted_model = torch.jit.script(eager_model)
assert eager_model(x).allclose(scripted_model(x))
def test_dist_setup_logger():
logger = setup_logger("trainer", level=logging.CRITICAL, distributed_rank=1)
assert logger.level != logging.CRITICAL
def test_setup_logger(capsys, dirname):
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
assert len(trainer.logger.handlers) == 0
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
fp = dirname / "log"
def _test(stream):
trainer.logger = setup_logger("trainer", stream=stream, filepath=fp, reset=True)
evaluator.logger = setup_logger("evaluator", stream=stream, filepath=fp, reset=True)
assert len(trainer.logger.handlers) == 2
assert len(evaluator.logger.handlers) == 2
@trainer.on(Events.EPOCH_COMPLETED)
def _(_):
evaluator.run([0, 1, 2])
trainer.run([0, 1, 2, 3, 4, 5], max_epochs=5)
captured = capsys.readouterr()
if stream is sys.stdout:
err = captured.out.split("\n")
else:
err = captured.err.split("\n")
with open(fp, "r") as h:
data = h.readlines()
for source in [err, data]:
assert "trainer INFO: Engine run starting with max_epochs=5." in source[0]
assert "evaluator INFO: Engine run starting with max_epochs=1." in source[1]
_test(stream=None)
_test(stream=sys.stderr)
_test(stream=sys.stdout)
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def _setup_a_logger_and_dump(name, message):
logger = setup_logger(name)
logger.info(message)
def test_override_setup_logger(capsys):
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert "tests.ignite.test_utils INFO: test_override_setup_logger" in source[0]
# change the logger level of _setup_a_logger_and_dump
setup_logger(name=__name__, level=logging.WARNING, reset=True)
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert source[0] == ""
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def test_deprecated():
# Test on function without docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_docs():
return 24
assert func_no_docs.__doc__ == "**Deprecated function**.\n\n .. deprecated:: 0.4.2"
# Test on function with docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_reasons():
"""Docs are cool"""
return 24
assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool.. deprecated:: 0.4.2"
# Test on function with docs, @deprecated with reasons
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_no_warnings():
"""Docs are very cool"""
return 24
assert (
func_no_warnings.__doc__
== "**Deprecated function**.\n\n Docs are very cool.. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2"
)
# Tests that the function emits DeprecationWarning
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_check_warning():
"""Docs are very ..."""
return 24
with pytest.deprecated_call():
assert func_check_warning() == 24
with pytest.warns(
DeprecationWarning,
match="This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details.",
):
# Trigger a warning.
func_check_warning()
# Test that the function raises Exception
@deprecated("0.4.2", "0.6.0", reasons=("reason1", "reason2"), raise_exception=True)
def func_with_everything():
return 1
with pytest.raises(Exception) as exec_info:
func_with_everything()
assert (
str(exec_info.value)
== "This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details."
)
def test_smoke__utils():
from ignite._utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot # noqa: F401
@pytest.mark.skipif(Version(torch.__version__) < Version("1.5.0"), reason="Skip if < 1.5.0")
def test_hash_checkpoint(tmp_path):
# download lightweight model
from torchvision.models import squeezenet1_0
model = squeezenet1_0()
torch.hub.download_url_to_file(
"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt"
)
hash_checkpoint_path, sha_hash = hash_checkpoint(f"{tmp_path}/squeezenet1_0.pt", str(tmp_path))
model.load_state_dict(torch.load(str(hash_checkpoint_path), "cpu"), True)
assert sha_hash[:8] == "b66bff10"
assert hash_checkpoint_path.name == f"squeezenet1_0-{sha_hash[:8]}.pt"
# test non-existent checkpoint_path
with pytest.raises(FileNotFoundError, match=r"not_found.pt does not exist in *"):
hash_checkpoint(f"{tmp_path}/not_found.pt", tmp_path)
|
import functools
import os
import shutil
import sys
import tempfile
import time
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
@pytest.fixture(
params=[
"cpu",
pytest.param("cuda", marks=pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no CUDA support")),
]
)
def available_device(request):
return request.param
@pytest.fixture()
def dirname():
path = Path(tempfile.mkdtemp())
yield path
shutil.rmtree(path)
@pytest.fixture()
def get_fixed_dirname(worker_id):
# multi-proc friendly fixed tmp dirname
path = "/tmp/fixed_tmp_dirname_"
lrank = int(worker_id.replace("gw", "")) if "gw" in worker_id else 0
def getter(name="test"):
nonlocal path
path += name
time.sleep(0.5 * lrank)
os.makedirs(path, exist_ok=True)
return path
yield getter
time.sleep(1.0 * lrank + 1.0)
if Path(path).exists():
shutil.rmtree(path)
# sort of sync
time.sleep(1.0)
@pytest.fixture()
def get_rank_zero_dirname(dirname):
def func():
import ignite.distributed as idist
zero_rank_dirname = Path(idist.all_gather(str(dirname))[0])
return zero_rank_dirname
yield func
@pytest.fixture(scope="module")
def local_rank(worker_id):
"""use a different account in each xdist worker"""
if "gw" in worker_id:
lrank = int(worker_id.replace("gw", ""))
elif "master" == worker_id:
lrank = 0
else:
raise RuntimeError(f"Can not get rank from worker_id={worker_id}")
os.environ["LOCAL_RANK"] = f"{lrank}"
yield lrank
del os.environ["LOCAL_RANK"]
@pytest.fixture(scope="module")
def world_size():
remove_env_var = False
if "WORLD_SIZE" not in os.environ:
os.environ["WORLD_SIZE"] = "1"
remove_env_var = True
yield int(os.environ["WORLD_SIZE"])
if remove_env_var:
del os.environ["WORLD_SIZE"]
@pytest.fixture()
def clean_env():
for k in ["RANK", "LOCAL_RANK", "WORLD_SIZE"]:
if k in os.environ:
del os.environ[k]
def _create_dist_context(dist_info, lrank):
dist.init_process_group(**dist_info)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
return {"local_rank": lrank, "world_size": dist_info["world_size"], "rank": dist_info["rank"]}
def _destroy_dist_context():
if dist.get_rank() == 0:
# To support Python 3.7; Otherwise we could do `.unlink(missing_ok=True)`
try:
Path("/tmp/free_port").unlink()
except FileNotFoundError:
pass
dist.barrier()
dist.destroy_process_group()
from ignite.distributed.utils import _SerialModel, _set_model
# We need to set synced model to initial state
_set_model(_SerialModel())
def _find_free_port():
# Taken from https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def _setup_free_port(local_rank):
port_file = "/tmp/free_port"
if local_rank == 0:
port = _find_free_port()
with open(port_file, "w") as h:
h.write(str(port))
return port
else:
counter = 10
while counter > 0:
counter -= 1
time.sleep(1)
if not Path(port_file).exists():
continue
with open(port_file, "r") as h:
port = h.readline()
return int(port)
raise RuntimeError(f"Failed to fetch free port on local rank {local_rank}")
@pytest.fixture()
def distributed_context_single_node_nccl(local_rank, world_size):
free_port = _setup_free_port(local_rank)
dist_info = {
"backend": "nccl",
"world_size": world_size,
"rank": local_rank,
"init_method": f"tcp://localhost:{free_port}",
}
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
@pytest.fixture()
def distributed_context_single_node_gloo(local_rank, world_size):
from datetime import timedelta
if sys.platform.startswith("win"):
temp_file = tempfile.NamedTemporaryFile(delete=False)
# can't use backslashes in f-strings
backslash = "\\"
init_method = f'file:///{temp_file.name.replace(backslash, "/")}'
else:
free_port = _setup_free_port(local_rank)
init_method = f"tcp://localhost:{free_port}"
temp_file = None
dist_info = {
"backend": "gloo",
"world_size": world_size,
"rank": local_rank,
"init_method": init_method,
"timeout": timedelta(seconds=60),
}
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
if temp_file:
temp_file.close()
@pytest.fixture()
def multi_node_conf(local_rank):
assert "node_id" in os.environ
assert "nnodes" in os.environ
assert "nproc_per_node" in os.environ
node_id = int(os.environ["node_id"])
nnodes = int(os.environ["nnodes"])
nproc_per_node = int(os.environ["nproc_per_node"])
out = {
"world_size": nnodes * nproc_per_node,
"rank": local_rank + node_id * nproc_per_node,
"local_rank": local_rank,
}
return out
def _create_mnodes_dist_context(dist_info, mnodes_conf):
dist.init_process_group(**dist_info)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.device(mnodes_conf["local_rank"])
return mnodes_conf
def _destroy_mnodes_dist_context():
dist.barrier()
dist.destroy_process_group()
from ignite.distributed.utils import _SerialModel, _set_model
# We need to set synced model to initial state
_set_model(_SerialModel())
@pytest.fixture()
def distributed_context_multi_node_gloo(multi_node_conf):
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
dist_info = {
"backend": "gloo",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
yield _create_mnodes_dist_context(dist_info, multi_node_conf)
_destroy_mnodes_dist_context()
@pytest.fixture()
def distributed_context_multi_node_nccl(multi_node_conf):
assert "MASTER_ADDR" in os.environ
assert "MASTER_PORT" in os.environ
os.environ["MASTER_PORT"] = str(int(os.getenv("MASTER_PORT")) + 1)
dist_info = {
"backend": "nccl",
"init_method": "env://",
"world_size": multi_node_conf["world_size"],
"rank": multi_node_conf["rank"],
}
yield _create_mnodes_dist_context(dist_info, multi_node_conf)
_destroy_mnodes_dist_context()
def _xla_template_worker_task(index, fn, args):
import torch_xla.core.xla_model as xm
xm.rendezvous("init")
fn(index, *args)
def _xla_execute(fn, args, nprocs):
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {}
if "COLAB_TPU_ADDR" in os.environ:
spawn_kwargs["start_method"] = "fork"
try:
xmp.spawn(_xla_template_worker_task, args=(fn, args), nprocs=nprocs, **spawn_kwargs)
except SystemExit as ex_:
assert ex_.code == 0, "Didn't successfully exit in XLA test"
@pytest.fixture()
def xmp_executor():
yield _xla_execute
@pytest.fixture()
def mock_gpu_is_not_available():
from unittest.mock import patch
with patch("torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = False
yield mock_cuda
def _hvd_task_with_init(func, args):
import horovod.torch as hvd
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
func(*args)
# Added a sleep to avoid flaky failures on circle ci
# Sometimes a rank is terminated before final collective
# op is finished.
# https://github.com/pytorch/ignite/pull/2357
time.sleep(2)
hvd.shutdown()
def _gloo_hvd_execute(func, args, np=1, do_init=False):
try:
# old API
from horovod.run.runner import run
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run
kwargs = dict(use_gloo=True, num_proc=np)
if do_init:
return run(_hvd_task_with_init, args=(func, args), **kwargs)
return run(func, args=args, **kwargs)
@pytest.fixture()
def gloo_hvd_executor():
yield _gloo_hvd_execute
skip_if_no_gpu = pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
skip_if_has_not_native_dist_support = pytest.mark.skipif(
not idist.has_native_dist_support, reason="Skip if no native dist support"
)
skip_if_has_not_xla_support = pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
skip_if_has_not_horovod_support = pytest.mark.skipif(
not idist.has_hvd_support, reason="Skip if no Horovod dist support"
)
# Unlike other backends, Horovod and multi-process XLA run user code by
# providing a utility function which accepts user code as a callable argument.
# To keep distributed tests backend-agnostic, we mark Horovod and multi-process XLA
# tests during fixture preparation and replace their function with the proper one
# just before running the test. PyTest stash is a safe way to share state between
# different stages of tool runtime and we use it to mark the tests.
is_horovod_stash_key = pytest.StashKey[bool]()
is_xla_stash_key = pytest.StashKey[bool]()
is_xla_single_device_stash_key = pytest.StashKey[bool]()
@pytest.fixture(
params=[
pytest.param("nccl", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support, skip_if_no_gpu]),
pytest.param("gloo_cpu", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support]),
pytest.param("gloo", marks=[pytest.mark.distributed, skip_if_has_not_native_dist_support, skip_if_no_gpu]),
pytest.param(
"horovod",
marks=[
pytest.mark.distributed,
skip_if_has_not_horovod_support,
pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc"),
],
),
pytest.param(
"single_device_xla",
marks=[
pytest.mark.tpu,
skip_if_has_not_xla_support,
pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars"),
],
),
pytest.param(
"xla_nprocs",
marks=[
pytest.mark.tpu,
skip_if_has_not_xla_support,
pytest.mark.skipif(
"NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars"
),
],
),
],
)
def distributed(request, local_rank, world_size):
if request.param in ("nccl", "gloo_cpu", "gloo"):
if "gloo" in request.param and sys.platform.startswith("win"):
temp_file = tempfile.NamedTemporaryFile(delete=False)
# can't use backslashes in f-strings
backslash = "\\"
init_method = f'file:///{temp_file.name.replace(backslash, "/")}'
else:
temp_file = None
free_port = _setup_free_port(local_rank)
init_method = f"tcp://localhost:{free_port}"
dist_info = {
"world_size": world_size,
"rank": local_rank,
"init_method": init_method,
}
if request.param == "nccl":
dist_info["backend"] = "nccl"
else:
dist_info["backend"] = "gloo"
from datetime import timedelta
dist_info["timeout"] = timedelta(seconds=60)
yield _create_dist_context(dist_info, local_rank)
_destroy_dist_context()
if temp_file:
temp_file.close()
elif request.param == "horovod":
request.node.stash[is_horovod_stash_key] = True
yield None
elif request.param in ("single_device_xla", "xla_nprocs"):
request.node.stash[is_xla_stash_key] = True
request.node.stash[is_xla_single_device_stash_key] = request.param == "single_device_xla"
yield {"xla_index": -1} if request.param == "xla_nprocs" else None
else:
raise RuntimeError(f"Invalid parameter value for `distributed` fixture, given {request.param}")
@pytest.hookimpl
def pytest_pyfunc_call(pyfuncitem: pytest.Function) -> None:
if pyfuncitem.stash.get(is_horovod_stash_key, False):
def testfunc_wrapper(test_func, **kwargs):
def hvd_worker():
import horovod.torch as hvd
hvd.init()
lrank = hvd.local_rank()
if torch.cuda.is_available():
torch.cuda.set_device(lrank)
test_func(**kwargs)
hvd.shutdown()
try:
# old API
from horovod.run.runner import run
except ImportError:
# new API: https://github.com/horovod/horovod/pull/2099
from horovod import run
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
hvd_kwargs = dict(use_gloo=True, num_proc=nproc)
run(hvd_worker, **hvd_kwargs)
pyfuncitem.obj = functools.partial(testfunc_wrapper, pyfuncitem.obj)
elif pyfuncitem.stash.get(is_xla_stash_key, False) and not pyfuncitem.stash[is_xla_single_device_stash_key]:
def testfunc_wrapper(testfunc, **kwargs):
def xla_worker(index, fn):
import torch_xla.core.xla_model as xm
kwargs["distributed"]["xla_index"] = index
xm.rendezvous("init")
fn(**kwargs)
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {"nprocs": int(os.environ["NUM_TPU_WORKERS"])}
if "COLAB_TPU_ADDR" in os.environ:
spawn_kwargs["start_method"] = "fork"
try:
xmp.spawn(xla_worker, args=(testfunc,), **spawn_kwargs)
except SystemExit as ex_:
assert ex_.code == 0, "Didn't successfully exit in XLA test"
pyfuncitem.obj = functools.partial(testfunc_wrapper, pyfuncitem.obj)
|
import torch
def cpu_and_maybe_cuda():
return ("cpu",) + (("cuda",) if torch.cuda.is_available() else ())
|
import warnings
from functools import partial
from itertools import accumulate
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics import Accuracy, RunningAverage
from ignite.metrics.metric import RunningBatchWise, RunningEpochWise, SingleEpochRunningBatchWise
def test_wrong_input_args():
with pytest.raises(TypeError, match=r"Argument src should be a Metric or None."):
RunningAverage(src=[12, 34])
with pytest.raises(ValueError, match=r"Argument alpha should be a float between"):
RunningAverage(alpha=-1.0)
with pytest.raises(ValueError, match=r"Argument output_transform should be None if src is a Metric"):
RunningAverage(Accuracy(), output_transform=lambda x: x[0])
with pytest.raises(ValueError, match=r"Argument output_transform should not be None if src corresponds"):
RunningAverage()
with pytest.raises(ValueError, match=r"Argument device should be None if src is a Metric"):
RunningAverage(Accuracy(), device="cpu")
with pytest.warns(UserWarning, match=r"`epoch_bound` is deprecated and will be removed in the future."):
m = RunningAverage(Accuracy(), epoch_bound=True)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("epoch_bound, usage", [(False, RunningBatchWise()), (True, SingleEpochRunningBatchWise())])
def test_epoch_bound(epoch_bound, usage):
with warnings.catch_warnings():
metric = RunningAverage(output_transform=lambda _: _, epoch_bound=epoch_bound)
e1 = Engine(lambda _, __: None)
e2 = Engine(lambda _, __: None)
metric.attach(e1, "")
metric.epoch_bound = None
metric.attach(e2, "", usage)
e1._event_handlers == e2._event_handlers
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise()])
def test_integration_batchwise(usage):
torch.manual_seed(10)
alpha = 0.98
n_iters = 10
batch_size = 10
n_classes = 10
max_epochs = 3
data = list(range(n_iters))
loss = torch.arange(n_iters, dtype=torch.float)
y_true = torch.randint(0, n_classes, size=(n_iters, batch_size))
y_pred = torch.rand(n_iters, batch_size, n_classes)
accuracy_running_averages = torch.tensor(
list(
accumulate(
map(
lambda y_yp: torch.sum(y_yp[1].argmax(dim=-1) == y_yp[0]).item() / y_yp[0].size(0),
zip(
y_true if isinstance(usage, SingleEpochRunningBatchWise) else y_true.repeat(max_epochs, 1),
y_pred if isinstance(usage, SingleEpochRunningBatchWise) else y_pred.repeat(max_epochs, 1, 1),
),
),
lambda ra, acc: ra * alpha + (1 - alpha) * acc,
)
)
)
if isinstance(usage, SingleEpochRunningBatchWise):
accuracy_running_averages = accuracy_running_averages.repeat(max_epochs)
loss_running_averages = torch.tensor(
list(
accumulate(
loss if isinstance(usage, SingleEpochRunningBatchWise) else loss.repeat(max_epochs),
lambda ra, loss_item: ra * alpha + (1 - alpha) * loss_item,
)
)
)
if isinstance(usage, SingleEpochRunningBatchWise):
loss_running_averages = loss_running_averages.repeat(max_epochs)
def update_fn(_, i):
loss_value = loss[i]
y_true_batch = y_true[i]
y_pred_batch = y_pred[i]
return loss_value, y_pred_batch, y_true_batch
trainer = Engine(update_fn)
acc_metric = RunningAverage(Accuracy(output_transform=lambda x: [x[1], x[2]]), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", usage)
avg_output = RunningAverage(output_transform=lambda x: x[0], alpha=alpha)
avg_output.attach(trainer, "running_avg_loss", usage)
metric_acc_running_averages = []
metric_loss_running_averages = []
@trainer.on(Events.ITERATION_COMPLETED)
def _(engine):
metric_acc_running_averages.append(engine.state.metrics["running_avg_accuracy"])
metric_loss_running_averages.append(engine.state.metrics["running_avg_loss"])
trainer.run(data, max_epochs=3)
assert (torch.tensor(metric_acc_running_averages) == accuracy_running_averages).all()
assert (torch.tensor(metric_loss_running_averages) == loss_running_averages).all()
def test_integration_epochwise():
torch.manual_seed(10)
alpha = 0.98
n_iters = 10
batch_size = 10
n_classes = 10
max_epochs = 3
data = list(range(n_iters))
y_true = torch.randint(0, n_classes, size=(n_iters, batch_size))
y_pred = torch.rand(max_epochs, n_iters, batch_size, n_classes)
accuracy_running_averages = torch.tensor(
list(
accumulate(
map(
lambda y_pred_epoch: torch.sum(y_pred_epoch.argmax(dim=-1) == y_true).item() / y_true.numel(),
y_pred,
),
lambda ra, acc: ra * alpha + (1 - alpha) * acc,
)
)
)
def update_fn(engine, i):
y_true_batch = y_true[i]
y_pred_batch = y_pred[engine.state.epoch - 1, i]
return y_pred_batch, y_true_batch
trainer = Engine(update_fn)
acc_metric = RunningAverage(Accuracy(), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", RunningEpochWise())
metric_acc_running_averages = []
@trainer.on(Events.EPOCH_COMPLETED)
def _(engine):
metric_acc_running_averages.append(engine.state.metrics["running_avg_accuracy"])
trainer.run(data, max_epochs=3)
assert (torch.tensor(metric_acc_running_averages) == accuracy_running_averages).all()
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_multiple_attach(usage):
n_iters = 100
errD_values = iter(np.random.rand(n_iters))
errG_values = iter(np.random.rand(n_iters))
D_x_values = iter(np.random.rand(n_iters))
D_G_z1 = iter(np.random.rand(n_iters))
D_G_z2 = iter(np.random.rand(n_iters))
def update_fn(engine, batch):
return {
"errD": next(errD_values),
"errG": next(errG_values),
"D_x": next(D_x_values),
"D_G_z1": next(D_G_z1),
"D_G_z2": next(D_G_z2),
}
trainer = Engine(update_fn)
alpha = 0.98
# attach running average
monitoring_metrics = ["errD", "errG", "D_x", "D_G_z1", "D_G_z2"]
for metric in monitoring_metrics:
foo = partial(lambda x, metric: x[metric], metric=metric)
RunningAverage(alpha=alpha, output_transform=foo).attach(trainer, metric, usage)
@trainer.on(usage.COMPLETED)
def check_values(engine):
values = []
for metric in monitoring_metrics:
values.append(engine.state.metrics[metric])
values = set(values)
assert len(values) == len(monitoring_metrics)
data = list(range(n_iters))
trainer.run(data)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("epoch_bound", [True, False, None])
@pytest.mark.parametrize("src", [Accuracy(), None])
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_detach(epoch_bound, src, usage):
with warnings.catch_warnings():
m = RunningAverage(src, output_transform=(lambda _: _) if src is None else None, epoch_bound=epoch_bound)
e = Engine(lambda _, __: None)
m.attach(e, "m", usage)
for event_handlers in e._event_handlers.values():
assert len(event_handlers) != 0
m.detach(e, usage)
for event_handlers in e._event_handlers.values():
assert len(event_handlers) == 0
def test_output_is_tensor():
m = RunningAverage(output_transform=lambda x: x)
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
m.update(torch.rand(10, requires_grad=True).mean())
v = m.compute()
assert isinstance(v, torch.Tensor)
assert not v.requires_grad
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise()])
def test_distrib_on_output(distributed, usage):
device = idist.device()
rank = idist.get_rank()
n_iters = 10
n_epochs = 3
# Data per rank
data = list(range(n_iters))
rank_loss_count = n_epochs * n_iters
all_loss_values = torch.arange(0, rank_loss_count * idist.get_world_size(), dtype=torch.float64).to(device)
loss_values = iter(all_loss_values[rank_loss_count * rank : rank_loss_count * (rank + 1)])
def update_fn(engine, batch):
loss_value = next(loss_values)
return loss_value.item()
trainer = Engine(update_fn)
alpha = 0.98
metric_device = device if device.type != "xla" else "cpu"
avg_output = RunningAverage(output_transform=lambda x: x, alpha=alpha, device=metric_device)
avg_output.attach(trainer, "running_avg_output", usage)
@trainer.on(usage.STARTED)
def reset_running_avg_output(engine):
engine.state.running_avg_output = None
@trainer.on(usage.ITERATION_COMPLETED)
def running_avg_output_update(engine):
i = engine.state.iteration - 1
o = sum([all_loss_values[i + r * rank_loss_count] for r in range(idist.get_world_size())]).item()
o /= idist.get_world_size()
if engine.state.running_avg_output is None:
engine.state.running_avg_output = o
else:
engine.state.running_avg_output = engine.state.running_avg_output * alpha + (1.0 - alpha) * o
@trainer.on(usage.COMPLETED)
def assert_equal_running_avg_output_values(engine):
it = engine.state.iteration
assert (
engine.state.running_avg_output == engine.state.metrics["running_avg_output"]
), f"{it}: {engine.state.running_avg_output} vs {engine.state.metrics['running_avg_output']}"
trainer.run(data, max_epochs=3)
@pytest.mark.parametrize("usage", [RunningBatchWise(), SingleEpochRunningBatchWise(), RunningEpochWise()])
def test_distrib_on_metric(distributed, usage):
device = idist.device()
rank = idist.get_rank()
n_iters = 10
n_epochs = 3
batch_size = 10
n_classes = 10
def _test(metric_device):
data = list(range(n_iters))
np.random.seed(12)
all_y_true_batch_values = np.random.randint(
0, n_classes, size=(idist.get_world_size(), n_epochs * n_iters, batch_size)
)
all_y_pred_batch_values = np.random.rand(idist.get_world_size(), n_epochs * n_iters, batch_size, n_classes)
y_true_batch_values = iter(all_y_true_batch_values[rank, ...])
y_pred_batch_values = iter(all_y_pred_batch_values[rank, ...])
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
trainer = Engine(update_fn)
alpha = 0.98
acc_metric = RunningAverage(Accuracy(device=metric_device), alpha=alpha)
acc_metric.attach(trainer, "running_avg_accuracy", usage)
running_avg_acc = [
None,
]
true_acc_metric = Accuracy(device=metric_device)
@trainer.on(Events.ITERATION_COMPLETED)
def manual_running_avg_acc(engine):
iteration = engine.state.iteration
if not isinstance(usage, RunningEpochWise) or ((iteration - 1) % n_iters) == 0:
true_acc_metric.reset()
if ((iteration - 1) % n_iters) == 0 and isinstance(usage, SingleEpochRunningBatchWise):
running_avg_acc[0] = None
for j in range(idist.get_world_size()):
output = (
torch.from_numpy(all_y_pred_batch_values[j, iteration - 1, :, :]),
torch.from_numpy(all_y_true_batch_values[j, iteration - 1, :]),
)
true_acc_metric.update(output)
if not isinstance(usage, RunningEpochWise) or (iteration % n_iters) == 0:
batch_acc = true_acc_metric._num_correct.item() * 1.0 / true_acc_metric._num_examples
if running_avg_acc[0] is None:
running_avg_acc[0] = batch_acc
else:
running_avg_acc[0] = running_avg_acc[0] * alpha + (1.0 - alpha) * batch_acc
engine.state.running_avg_acc = running_avg_acc[0]
@trainer.on(Events.ITERATION_COMPLETED)
def assert_equal_running_avg_acc_values(engine):
print(engine.state.iteration)
if not isinstance(usage, RunningEpochWise) or (
(engine.state.iteration > 1) and ((engine.state.iteration % n_iters) == 1)
):
assert (
engine.state.running_avg_acc == engine.state.metrics["running_avg_accuracy"]
), f"{engine.state.running_avg_acc} vs {engine.state.metrics['running_avg_accuracy']}"
trainer.run(data, max_epochs=3)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def test_distrib_accumulator_device(distributed):
device = idist.device()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
# Don't test the src=Metric case because compute() returns a scalar,
# so the metric doesn't accumulate on the device specified
avg = RunningAverage(output_transform=lambda x: x, device=metric_device)
assert avg._device == metric_device
# Value is None until the first update then compute call
for _ in range(3):
avg.update(torch.tensor(1.0, device=device))
avg.compute()
assert (
avg._value.device == metric_device
), f"{type(avg._value.device)}:{avg._value.device} vs {type(metric_device)}:{metric_device}"
|
from typing import Sequence, Union
import numpy as np
import pytest
import torch
from skimage.metrics import structural_similarity as ski_ssim
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import SSIM
def test_zero_div():
ssim = SSIM(data_range=1.0)
with pytest.raises(NotComputableError):
ssim.compute()
def test_invalid_ssim():
y_pred = torch.rand(1, 1, 4, 4)
y = y_pred + 0.125
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=2)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument kernel_size should be either int or a sequence of int."):
ssim = SSIM(data_range=1.0, kernel_size=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected sigma to have positive number."):
ssim = SSIM(data_range=1.0, sigma=(-1, -1))
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have the same shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have BxCxHxW shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y, y))
ssim.compute()
with pytest.raises(TypeError, match=r"Expected y_pred and y to have the same data type."):
y = y.double()
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
@pytest.mark.parametrize(
"shape, kernel_size, gaussian, use_sample_covariance",
[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]],
)
def test_ssim(available_device, shape, kernel_size, gaussian, use_sample_covariance):
y_pred = torch.rand(shape, device=available_device)
y = y_pred * 0.8
compare_ssim_ignite_skiimg(
y_pred,
y,
available_device,
kernel_size=kernel_size,
gaussian=gaussian,
use_sample_covariance=use_sample_covariance,
)
def compare_ssim_ignite_skiimg(
y_pred: torch.Tensor,
y: torch.Tensor,
device: torch.device,
precision: float = 2e-5, # default to float32 expected precision
*,
skimg_y_pred: Union[np.ndarray, None] = None,
skimg_y: Union[np.ndarray, None] = None,
data_range: float = 1.0,
kernel_size: Union[int, Sequence[int]] = 11,
gaussian: bool = True,
use_sample_covariance: bool = False,
):
sigma = 1.5
ssim = SSIM(data_range=data_range, sigma=sigma, device=device)
ssim.update((y_pred, y))
ignite_ssim = ssim.compute()
if y_pred.dtype == torch.bfloat16:
y_pred = y_pred.to(dtype=torch.float16)
if skimg_y_pred is None:
skimg_y_pred = y_pred.cpu().numpy()
if skimg_y is None:
skimg_y = skimg_y_pred * 0.8
skimg_ssim = ski_ssim(
skimg_y_pred,
skimg_y,
win_size=kernel_size,
sigma=sigma,
channel_axis=1,
gaussian_weights=gaussian,
data_range=data_range,
use_sample_covariance=use_sample_covariance,
)
assert isinstance(ignite_ssim, float)
assert np.allclose(ignite_ssim, skimg_ssim, atol=precision)
@pytest.mark.parametrize(
"metric_device, y_pred_device",
[
[torch.device("cpu"), torch.device("cpu")],
[torch.device("cpu"), torch.device("cuda")],
[torch.device("cuda"), torch.device("cpu")],
[torch.device("cuda"), torch.device("cuda")],
],
)
def test_ssim_device(available_device, metric_device, y_pred_device):
if available_device == "cpu":
pytest.skip("This test requires a cuda device.")
data_range = 1.0
sigma = 1.5
shape = (12, 5, 256, 256)
ssim = SSIM(data_range=data_range, sigma=sigma, device=metric_device)
y_pred = torch.rand(shape, device=y_pred_device)
y = y_pred * 0.8
if metric_device == torch.device("cuda") and y_pred_device == torch.device("cpu"):
with pytest.warns(UserWarning):
ssim.update((y_pred, y))
else:
ssim.update((y_pred, y))
if metric_device == torch.device("cuda") or y_pred_device == torch.device("cuda"):
# A tensor will always have the device index set
excepted_device = torch.device("cuda:0")
else:
excepted_device = torch.device("cpu")
assert ssim._kernel.device == excepted_device
def test_ssim_variable_batchsize(available_device):
# Checks https://github.com/pytorch/ignite/issues/2532
sigma = 1.5
data_range = 1.0
ssim = SSIM(data_range=data_range, sigma=sigma)
y_preds = [
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(8, 3, 28, 28, device=available_device),
torch.rand(16, 3, 28, 28, device=available_device),
torch.rand(1, 3, 28, 28, device=available_device),
torch.rand(30, 3, 28, 28, device=available_device),
]
y_true = [v * 0.8 for v in y_preds]
for y_pred, y in zip(y_preds, y_true):
ssim.update((y_pred, y))
out = ssim.compute()
ssim.reset()
ssim.update((torch.cat(y_preds), torch.cat(y_true)))
expected = ssim.compute()
assert np.allclose(out, expected)
def test_ssim_variable_channel(available_device):
y_preds = [
torch.rand(12, 5, 28, 28, device=available_device),
torch.rand(12, 4, 28, 28, device=available_device),
torch.rand(12, 7, 28, 28, device=available_device),
torch.rand(12, 3, 28, 28, device=available_device),
torch.rand(12, 11, 28, 28, device=available_device),
torch.rand(12, 6, 28, 28, device=available_device),
]
y_true = [v * 0.8 for v in y_preds]
for y_pred, y in zip(y_preds, y_true):
compare_ssim_ignite_skiimg(y_pred, y, available_device)
@pytest.mark.parametrize(
"dtype, precision", [(torch.bfloat16, 2e-3), (torch.float16, 4e-4), (torch.float32, 2e-5), (torch.float64, 2e-5)]
)
def test_cuda_ssim_dtypes(available_device, dtype, precision):
# Checks https://github.com/pytorch/ignite/pull/3034
if available_device == "cpu" and dtype in [torch.float16, torch.bfloat16]:
pytest.skip(reason=f"Unsupported dtype {dtype} on CPU device")
shape = (12, 3, 28, 28)
y_pred = torch.rand(shape, device=available_device, dtype=dtype)
y = y_pred * 0.8
compare_ssim_ignite_skiimg(y_pred, y, available_device, precision)
@pytest.mark.parametrize(
"shape, kernel_size, gaussian, use_sample_covariance",
[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]],
)
def test_ssim_uint8(available_device, shape, kernel_size, gaussian, use_sample_covariance):
y_pred = torch.randint(0, 255, shape, device=available_device, dtype=torch.uint8)
y = (y_pred * 0.8).to(dtype=torch.uint8)
sigma = 1.5
data_range = 255
ssim = SSIM(data_range=data_range, sigma=sigma, device=available_device)
ssim.update((y_pred, y))
ignite_ssim = ssim.compute()
skimg_pred = y_pred.cpu().numpy()
skimg_y = (skimg_pred * 0.8).astype(np.uint8)
skimg_ssim = ski_ssim(
skimg_pred,
skimg_y,
win_size=kernel_size,
sigma=sigma,
channel_axis=1,
gaussian_weights=gaussian,
data_range=data_range,
use_sample_covariance=use_sample_covariance,
)
assert isinstance(ignite_ssim, float)
assert np.allclose(ignite_ssim, skimg_ssim, atol=1e-5)
@pytest.mark.parametrize("metric_device", ["cpu", "process_device"])
def test_distrib_integration(distributed, metric_device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
n_iters = 100
batch_size = 10
device = idist.device()
if metric_device == "process_device":
metric_device = device if device.type != "xla" else "cpu"
y_pred = torch.rand(n_iters * batch_size, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size, ...],
y[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
SSIM(data_range=1.0, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(
np_pred,
np_true,
win_size=11,
sigma=1.5,
channel_axis=1,
gaussian_weights=True,
data_range=1.0,
use_sample_covariance=False,
)
tol = 1e-3 if device.type == "xla" else 1e-4 # Isn't better to ask `distributed` about backend info?
assert pytest.approx(res, abs=tol) == true_res
engine = Engine(update)
SSIM(data_range=1.0, gaussian=False, kernel_size=7, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(np_pred, np_true, win_size=7, channel_axis=1, gaussian_weights=False, data_range=1.0)
assert pytest.approx(res, abs=tol) == true_res
@pytest.mark.parametrize("metric_device", [torch.device("cpu"), "process_device"])
def test_distrib_accumulator_device(distributed, metric_device):
device = idist.device()
if metric_device == "process_device":
metric_device = torch.device(device if device.type != "xla" else "cpu")
ssim = SSIM(data_range=1.0, device=metric_device)
assert ssim._kernel is None
assert isinstance(ssim._kernel_2d, torch.Tensor)
for dev in [ssim._device, ssim._kernel_2d.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
ssim.update((y_pred, y))
dev = ssim._sum_of_ssim.device
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
|
import numbers
import os
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
from pytest import approx, raises
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.metrics import ConfusionMatrix, Precision, Recall
from ignite.metrics.metric import (
BatchFiltered,
BatchWise,
EpochWise,
Metric,
reinit__is_reduced,
RunningBatchWise,
RunningEpochWise,
SingleEpochRunningBatchWise,
sync_all_reduce,
)
class DummyMetric1(Metric):
def __init__(self, true_output, output_transform=lambda x: x):
super(DummyMetric1, self).__init__(output_transform=output_transform)
self.true_output = true_output
def reset(self):
pass
def compute(self):
pass
def update(self, output):
assert output == self.true_output
def test_no_transform():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_transform():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
def transform(output):
pred_dict, target_dict = output
return pred_dict["y"], target_dict["y"]
metric = DummyMetric1(true_output=(y_pred, y), output_transform=transform)
state = State(output=({"y": y_pred}, {"y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_output_as_mapping_wrong_keys():
metric = DummyMetric1(true_output=(0, 1))
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(
ValueError, match=r"When transformed engine's output is a mapping, " r"it should contain \('y_pred', 'y'\) keys"
):
metric.iteration_completed(engine)
def test_output_as_mapping_keys_is_none():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
metric = DummyMetric()
assert metric.required_output_keys is None
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(TypeError, match=r"Transformed engine output for DummyMetric metric should be a tuple/list"):
metric.iteration_completed(engine)
def test_output_as_mapping():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=({"y_pred": y_pred, "y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_no_grad():
y_pred = torch.zeros(4, requires_grad=True)
y = torch.zeros(4, requires_grad=False)
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
y_pred, y = output
mse = torch.pow(y_pred - y.view_as(y_pred), 2)
assert y_pred.requires_grad
assert not mse.requires_grad
metric = DummyMetric()
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_arithmetics():
class ListGatherMetric(Metric):
def __init__(self, index):
self.index = index
super(ListGatherMetric, self).__init__()
def reset(self):
self.list_ = []
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
# __add__
m0_plus_m1 = m0 + m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_plus_m1.compute() == 11
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_plus_m1.compute() == 22
m2_plus_2 = m2 + 2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
m2_plus_2 = 2 + m2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
# __sub__
m0_minus_m1 = m0 - m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_minus_m1.compute() == -9
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_minus_m1.compute() == -18
m2_minus_2 = m2 - 2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == 98
m2_minus_2 = 2 - m2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == -98
# __mul__
m0_times_m1 = m0 * m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_times_m1.compute() == 10
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_times_m1.compute() == 40
m2_times_2 = m2 * 2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
m2_times_2 = 2 * m2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
# __pow__
m0_pow_m1 = m0**m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_pow_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_pow_m1.compute() == 2**20
m2_pow_2 = m2**2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 10000
m2_pow_2 = 0.99**m2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 0.3660323412732292
# __mod__
m0_mod_m1 = m0 % m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_mod_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_mod_m1.compute() == 2
m2_mod_2 = m2 % 2
m2.update([1, 10, 100])
assert m2_mod_2.compute() == 0
# __truediv__
m0_truediv_m1 = m0 / m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2 / 2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = 200 / m2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
m0_truediv_m1 = m0.__truediv__(m1)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2.__truediv__(2)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = m2.__rtruediv__(200)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
# __floordiv__
m0_floordiv_m1 = m0 // m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_floordiv_m1.compute() == 0
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_floordiv_m1.compute() == 0
m2_floordiv_2 = m2 // 2
m2.update([1, 10, 100])
assert m2_floordiv_2.compute() == 50
def test_attach():
class CountMetric(Metric):
def __init__(self, value):
self.reset_count = 0
super(CountMetric, self).__init__()
self.reset_count = 0
self.compute_count = 0
self.update_count = 0
self.value = value
def reset(self):
self.reset_count += 1
def compute(self):
self.compute_count += 1
return self.value
def update(self, output):
self.update_count += 1
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = CountMetric(123)
m2 = CountMetric(456)
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
engine.run(range(10), 5)
assert engine.state.metrics["m1"] == 123
assert engine.state.metrics["m2_1"] == 456
assert engine.state.metrics["m2_2"] == 456
assert m1.reset_count == 5
assert m1.compute_count == 5
assert m1.update_count == 50
assert m2.reset_count == 5
assert m2.compute_count == 10
assert m2.update_count == 50
assert m1.is_attached(engine)
assert m2.is_attached(engine)
def test_detach():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = DummyMetric()
m2 = DummyMetric()
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
m1.detach(engine)
m2.detach(engine)
engine.run(range(10), 5)
assert "m1" not in engine.state.metrics
assert "m2_1" not in engine.state.metrics
assert "m2_2" not in engine.state.metrics
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
def test_integration():
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, np.argmax(y_pred, axis=-1), average=None)
recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None)
f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average=None)
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
f1 = state.metrics["f1"].numpy()
assert precision_true == approx(precision), f"{precision_true} vs {precision}"
assert recall_true == approx(recall), f"{recall_true} vs {recall}"
assert f1_true == approx(f1), f"{f1_true} vs {f1}"
def test_abstract_class():
with raises(TypeError):
Metric()
def test_pytorch_operators():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.rand(15, 10, 5).float()
y = torch.randint(0, 5, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision(average=False)
precision_2 = Precision(average=False)
norm_summed_precision = (precision_1 + precision_2).norm(p=10)
def compute_true_norm_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred, average=None)
p2 = precision_score(y, y_pred, average=None)
return np.linalg.norm(p1 + p2, ord=10)
_test(norm_summed_precision, "mean summed precision", compute_true_value_fn=compute_true_norm_summed_precision)
precision = Precision(average=False)
recall = Recall(average=False)
sum_precision_recall = (precision + recall).sum()
def compute_sum_precision_recall(y_pred, y):
p = precision_score(y, y_pred, average=None)
r = recall_score(y, y_pred, average=None)
return np.sum(p + r)
_test(sum_precision_recall, "sum precision recall", compute_true_value_fn=compute_sum_precision_recall)
precision = Precision(average=False)
recall = Recall(average=False)
f1 = (precision * recall * 2 / (precision + recall + 1e-20)).mean()
def compute_f1(y_pred, y):
f1 = f1_score(y, y_pred, average="macro")
return f1
_test(f1, "f1", compute_true_value_fn=compute_f1)
def test_indexing_metric():
def _test(ignite_metric, sklearn_metic, sklearn_args, index, num_classes=5):
y_pred = torch.rand(15, 10, num_classes).float()
y = torch.randint(0, num_classes, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
metrics = {"metric": ignite_metric[index], "metric_wo_index": ignite_metric}
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
sklearn_output = sklearn_metic(
y.view(-1).numpy(), y_pred.view(-1, num_classes).argmax(dim=1).numpy(), **sklearn_args
)
assert (state.metrics["metric_wo_index"][index] == state.metrics["metric"]).all()
assert np.allclose(state.metrics["metric"].numpy(), sklearn_output)
num_classes = 5
labels = list(range(0, num_classes, 2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(0, num_classes, 2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
# np.ix_ is used to allow for a 2D slice of a matrix. This is required to get accurate result from
# ConfusionMatrix. ConfusionMatrix must be sliced the same row-wise and column-wise.
labels = list(range(0, num_classes, 2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = list(range(num_classes - 1, 0, -2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = [1]
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
class DummyMetric2(Metric):
@reinit__is_reduced
def reset(self):
pass
def compute(self):
pass
@reinit__is_reduced
def update(self, output):
pass
def _test_compute_with_sync_all_reduce_doesnt_change_attributes(device):
class DummyMetric3(Metric):
@reinit__is_reduced
def reset(self):
self.a = torch.tensor(0.0, device=self._device)
self.b = 0.0
def update(self, output):
self.a += torch.tensor(1.0)
self.b += 1.0
@sync_all_reduce("a", "b")
def compute(self):
return self.a.item(), self.b
metric_device = device if torch.device(device).type != "xla" else "cpu"
metric = DummyMetric3(device=metric_device)
metric.update(None)
assert metric.a.item() == metric.b == 1.0
metric.compute()
assert metric.a.item() == metric.b == 1.0
def _test_invalid_sync_all_reduce(device):
class InvalidMetric(Metric):
@reinit__is_reduced
def reset(self):
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], requires_grad=False)
self.c = 0.0
self.n = 0
self.m = -1
self.d = "a string"
def compute(self):
pass
def update(self):
pass
@sync_all_reduce("a:sum")
def invalid_reduction_op_1(self):
pass
@sync_all_reduce("c:MaX")
def invalid_reduction_op_2(self):
pass
@sync_all_reduce("n:MINN")
def invalid_reduction_op_3(self):
pass
@sync_all_reduce("m:PROduCT")
def invalid_reduction_op_4(self):
pass
@sync_all_reduce("missingattr")
def invalid_reduction_op_5(self):
pass
@sync_all_reduce("d")
def invalid_reduction_op_6(self):
pass
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = InvalidMetric(device=metric_device)
m.reset()
if idist.get_world_size() > 1:
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_1()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_2()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_3()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_4()
with pytest.raises(ValueError, match=r"has no attribute named `missingattr`."):
m.invalid_reduction_op_5()
with pytest.raises(
TypeError, match=r"Attribute provided to sync_all_reduce should be a number or tensor but `d`"
):
m.invalid_reduction_op_6()
def _test_distrib_sync_all_reduce_decorator(device):
class DummyMetric(Metric):
@reinit__is_reduced
def reset(self):
# SUM op
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], device=self._device, requires_grad=False)
self.a_nocomp = self.a.clone().to("cpu")
self.b = torch.tensor(1.0, dtype=torch.float64, device=self._device, requires_grad=False)
self.b_nocomp = self.b.clone().to("cpu")
self.c = 0.0
self.c_nocomp = self.c
self.n = 0
self.n_nocomp = self.n
# MAX op
self.m = -1
# MIN op
self.k = 10000
# initialize number of updates to test (MAX, MIN) ops
self.num_updates = 0
# PRODUCT op
self.prod = torch.tensor([2.0, 3.0], device=self._device, requires_grad=False)
self.prod_nocomp = self.prod.clone().to("cpu")
@sync_all_reduce("a", "b", "c", "n:SUM", "m:MAX", "k:MIN", "prod:PRODUCT")
def compute(self):
assert (self.a.cpu() == (self.a_nocomp + 10) * idist.get_world_size()).all()
assert (self.b.cpu() == (self.b_nocomp - 5) * idist.get_world_size()).all()
assert self.c == pytest.approx((self.c_nocomp + 1.23456) * idist.get_world_size())
assert self.n == (self.n_nocomp + 1) * idist.get_world_size()
assert self.m == self.num_updates * (idist.get_world_size() - 1) - 1
assert self.k == 10000 - self.num_updates * (idist.get_world_size() - 1)
temp_prod_nocomp = 5 * self.prod_nocomp # new variable for the recomputing
temp_prod_nocomp = temp_prod_nocomp.pow(idist.get_world_size())
assert (self.prod.cpu() == temp_prod_nocomp).all()
@reinit__is_reduced
def update(self, output):
# SUM op
self.n += 1
self.c += 1.23456
self.a += 10.0
self.b -= 5.0
# MAX op
self.m += idist.get_rank()
# MIN op
self.k -= idist.get_rank()
# numper of updates for (MAX, MIN) ops
self.num_updates += 1
# PRODUCT op
self.prod *= 5
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = DummyMetric(device=metric_device)
m.update(None)
m.compute()
# check if attributes are restored to their original values after previous `compute`
m.compute()
def _test_creating_on_xla_fails(device):
with pytest.raises(ValueError, match=r"Cannot create metric on an XLA device. Use device='cpu' instead."):
DummyMetric2(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_sync_all_reduce_decorator, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_invalid_sync_all_reduce, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_compute_with_sync_all_reduce_doesnt_change_attributes, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
_test_distrib_state_dict(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
_test_compute_with_sync_all_reduce_doesnt_change_attributes(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
def test_completed():
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
m = DummyMetric()
# tensor
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value=torch.tensor(1.0))
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": 1.0}
assert isinstance(engine.state.metrics["metric"], numbers.Number)
# mapping
engine = MagicMock(state=State(metrics={}))
metrics = {"foo": 1, "bar": torch.tensor(2.0), "baz": {"qux": "quux"}}
m.compute = MagicMock(return_value=metrics)
with pytest.raises(ValueError, match=r"Argument name 'foo' is conflicting with mapping keys"):
m.completed(engine, "foo")
m.completed(engine, "metric")
metrics["metric"] = metrics
assert engine.state.metrics == metrics
# other
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value="foo")
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": "foo"}
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_completed_on_cuda():
# Checks https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
return torch.tensor([1.0, 2.0, 3.0], device="cuda")
def update(self, output):
pass
m = DummyMetric()
# tensor
engine = MagicMock(state=State(metrics={}))
m.completed(engine, "metric")
assert "metric" in engine.state.metrics
assert isinstance(engine.state.metrics["metric"], torch.Tensor)
assert engine.state.metrics["metric"].device.type == "cpu"
def test_usage_exception():
engine = Engine(lambda e, b: b)
m = DummyMetric2()
with pytest.raises(TypeError, match=r"Unhandled usage type"):
m.attach(engine, "dummy", usage=1)
with pytest.raises(
ValueError,
match=r"usage should be '\(Running\)EpochWise.usage_name' or '\(\(SingleEpoch\)Running\)BatchWise.usage_name'",
):
m.attach(engine, "dummy", usage="fake")
class DummyAccumulateInListMetric(Metric):
def __init__(self):
super(DummyAccumulateInListMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
@pytest.mark.parametrize("usage", ["epoch_wise", EpochWise.usage_name, EpochWise()])
def test_epochwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateInListMetric()
m.attach(engine, "ewm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
ewm = engine.state.metrics["ewm"]
assert len(ewm) == 3
assert ewm == [0, 1, 2]
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
class DummyAccumulateMetric(Metric):
def __init__(self):
super(DummyAccumulateMetric, self).__init__()
self.value = 0
def reset(self):
self.value = 0
def compute(self):
return self.value
def update(self, output):
self.value += output
@pytest.mark.parametrize("usage", ["running_epoch_wise", RunningEpochWise.usage_name, RunningEpochWise()])
def test_running_epochwise_usage(usage):
engine = Engine(lambda e, b: e.state.metrics["ewm"])
engine.state.metrics["ewm"] = 0
@engine.on(Events.EPOCH_STARTED)
def _():
engine.state.metrics["ewm"] += 1
m = DummyAccumulateMetric()
m.attach(engine, "rewm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rewm"] == sum(range(engine.state.epoch + 1))
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize("usage", ["batch_wise", BatchWise.usage_name, BatchWise()])
def test_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateInListMetric()
m.attach(engine, "bwm", usage=usage)
@engine.on(Events.ITERATION_COMPLETED)
def _():
bwm = engine.state.metrics["bwm"]
assert len(bwm) == 1
assert bwm[0] == (engine.state.iteration - 1) % 3
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize("usage", ["running_batch_wise", RunningBatchWise.usage_name, RunningBatchWise()])
def test_running_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateMetric()
m.attach(engine, "rbwm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rbwm"] == 6 * engine.state.epoch
engine.run([0, 1, 2, 3], max_epochs=10)
m.detach(engine, usage=usage)
@pytest.mark.parametrize(
"usage", ["single_epoch_running_batch_wise", SingleEpochRunningBatchWise.usage_name, SingleEpochRunningBatchWise()]
)
def test_single_epoch_running_batchwise_usage(usage):
engine = Engine(lambda e, b: b)
m = DummyAccumulateMetric()
m.attach(engine, "rbwm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
assert engine.state.metrics["rbwm"] == 6
engine.run([0, 1, 2, 3], max_epochs=10)
m.detach(engine, usage=usage)
def test_batchfiltered_usage():
class MyMetric(Metric):
def __init__(self):
super(MyMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
engine = Engine(lambda e, b: b)
m = MyMetric()
usage = BatchFiltered(every=2)
m.attach(engine, "bfm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
bfm = engine.state.metrics["bfm"]
assert len(bfm) == 2
assert bfm[0] == 1
engine.run([0, 1, 2, 3], max_epochs=10)
def test_override_required_output_keys():
# https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/5
import torch.nn as nn
from ignite.engine import create_supervised_evaluator
counter = [0]
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, x = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert x.shape == (4, 10)
assert x.equal(data[counter[0]][0])
assert y.equal(data[counter[0]][1])
counter[0] += 1
def reset(self):
pass
def compute(self):
pass
model = nn.Linear(10, 3)
metrics = {"Precision": Precision(), "CustomMetric": CustomMetric()}
evaluator = create_supervised_evaluator(
model, metrics=metrics, output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)
@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)]])
def test_list_of_tensors_and_numbers(shapes):
def check_fn(output):
assert len(output) == 2
assert isinstance(output[0], torch.Tensor)
assert isinstance(output[1], torch.Tensor)
assert output[0].shape == (1,) + shapes[0]
assert output[1].shape == (1,) + shapes[1]
def get_data(gt_as_scalar=False):
return [
(
[torch.rand(shapes[0]) for _ in range(3 + i)], # predictions
[
torch.rand(shapes[1]).item() if gt_as_scalar else torch.rand(shapes[1]) for _ in range(3 + i)
], # ground truth
)
for i in range(5)
]
class MyMetric(Metric):
def __init__(self, check_fn):
super(MyMetric, self).__init__()
self.check_fn = check_fn
def reset(self):
pass
def compute(self):
pass
def update(self, output):
self.check_fn(output)
engine = Engine(lambda e, b: b)
m = MyMetric(check_fn)
m.attach(engine, "m")
data = get_data()
engine.run(data)
if len(shapes[1]) == 0:
data = get_data(gt_as_scalar=True)
engine.run(data)
def test_list_of_tensors_and_numbers_unsupported_output():
class MyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
engine = Engine(lambda e, b: ([0, 1, 2], [0, 1, 2], [0, 1, 2]))
m = MyMetric()
m.attach(engine, "m")
with pytest.raises(ValueError, match=r"Output should have 2 items of the same length"):
engine.run([0] * 10)
engine = Engine(lambda e, b: ([0, 1, 2], [0, 1, 2, 4]))
m = MyMetric()
m.attach(engine, "m")
with pytest.raises(ValueError, match=r"Output should have 2 items of the same length"):
engine.run([0] * 10)
class DummyMetric4(Metric):
_state_dict_all_req_keys = ("dnumber", "fnumber", "tensor")
def __init__(self, value: int):
super().reset()
self.dnumber = value
self.fnumber = float(value + 1)
self.tensor = torch.tensor([value + 2])
def reset(self):
self.dnumber = -1
self.fnumber = -2.0
self.tensor = torch.tensor([-3])
def update(self, output):
pass
def compute(self):
pass
def test_wrong_state_dict():
class WrongMetric(Metric):
_state_dict_all_req_keys = ("object",)
def __init__(self, value):
super().__init__()
self.object = {"a": [value]}
def reset(self):
pass
def update(self, output):
pass
def compute(self):
pass
metric = WrongMetric(2)
with pytest.raises(TypeError, match="Currently, only numeric or tensor-typed attributes of the metric"):
metric.state_dict()
delattr(metric, "object")
with pytest.raises(ValueError, match="Found a value in _state_dict_all_req_keys that is not among"):
metric.state_dict()
def test_state_dict():
metric = DummyMetric4(1)
state = metric.state_dict()
assert state.keys() == {"dnumber", "fnumber", "tensor"}
metric.reset()
metric.load_state_dict(state)
assert metric.dnumber == 1
assert metric.fnumber == 2
assert metric.tensor == torch.tensor([3])
def _test_distrib_state_dict(device):
rank = idist.get_local_rank()
metric = DummyMetric4(rank)
state = metric.state_dict()
assert isinstance(state["dnumber"][rank], int)
assert isinstance(state["fnumber"][rank], float)
metric.reset()
metric.load_state_dict(state)
assert metric.dnumber == rank and isinstance(metric.dnumber, int)
assert metric.fnumber == rank + 1 and isinstance(metric.fnumber, float)
assert metric.tensor == torch.tensor([rank + 2])
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import RootMeanSquaredError
def test_zero_sample():
rmse = RootMeanSquaredError()
with pytest.raises(
NotComputableError, match=r"MeanSquaredError must have at least one example before it can be computed"
):
rmse.compute()
@pytest.fixture(params=[0, 1, 2, 3])
def test_data(request):
return [
(torch.empty(10).uniform_(0, 10), torch.empty(10).uniform_(0, 10), 1),
(torch.empty(10, 1).uniform_(-10, 10), torch.empty(10, 1).uniform_(-10, 10), 1),
# updated batches
(torch.empty(50).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16),
(torch.empty(50, 1).uniform_(-10, 10), torch.empty(50, 1).uniform_(-10, 10), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(3))
def test_compute(n_times, test_data):
rmse = RootMeanSquaredError()
y_pred, y, batch_size = test_data
rmse.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
rmse.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
rmse.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
np_res = np.sqrt(np.power((np_y - np_y_pred), 2.0).sum() / np_y.shape[0])
res = rmse.compute()
assert isinstance(res, float)
assert pytest.approx(res) == np_res
def _test_distrib_integration(device, tol=1e-6):
from ignite.engine import Engine
rank = idist.get_rank()
def _test(metric_device):
n_iters = 2
batch_size = 3
torch.manual_seed(12 + rank)
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = (rank + 1) * torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return y_preds[i * batch_size : (i + 1) * batch_size], y_true[i * batch_size : (i + 1) * batch_size]
engine = Engine(update)
m = RootMeanSquaredError(device=metric_device)
m.attach(engine, "rmse")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "rmse" in engine.state.metrics
res = engine.state.metrics["rmse"]
true_res = np.sqrt(np.mean(np.square((y_true - y_preds).cpu().numpy())))
assert pytest.approx(res, rel=tol) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import dill
from ignite.metrics import Metric
class Accumulation(Metric):
def __init__(self):
self.value = 0
super(Accumulation, self).__init__()
def reset(self):
self.value = 0
def compute(self):
return self.value
def update(self, output):
self.value += output
def test_metric():
def _test(m, values, e):
for v in values:
m.update(v)
assert m.compute() == e
metric = Accumulation()
m1 = dill.loads(dill.dumps(metric))
values = list(range(10))
expected = sum(values)
_test(m1, values, expected)
metric.update(5)
m2 = dill.loads(dill.dumps(metric))
_test(m2, values, expected + 5)
|
import json
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics.classification_report import ClassificationReport
def _test_integration_multiclass(device, output_dict):
rank = idist.get_rank()
def _test(metric_device, n_classes, labels=None):
classification_report = ClassificationReport(device=metric_device, output_dict=output_dict, labels=labels)
n_iters = 80
batch_size = 16
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
classification_report.attach(engine, "cr")
data = list(range(n_iters))
engine.run(data=data)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cr" in engine.state.metrics
res = engine.state.metrics["cr"]
res2 = classification_report.compute()
assert res == res2
assert isinstance(res, dict if output_dict else str)
if not output_dict:
res = json.loads(res)
from sklearn.metrics import classification_report as sklearn_classification_report
sklearn_result = sklearn_classification_report(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), output_dict=True, zero_division=1
)
for i in range(n_classes):
label_i = labels[i] if labels else str(i)
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
for i in range(5):
torch.manual_seed(12 + rank + i)
# check multiple random inputs as random exact occurencies are rare
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device, 2, ["label0", "label1"])
_test(metric_device, 2)
_test(metric_device, 3, ["label0", "label1", "label2"])
_test(metric_device, 3)
_test(metric_device, 4, ["label0", "label1", "label2", "label3"])
_test(metric_device, 4)
def _test_integration_multilabel(device, output_dict):
rank = idist.get_rank()
def _test(metric_device, n_epochs, labels=None):
classification_report = ClassificationReport(device=metric_device, output_dict=output_dict, is_multilabel=True)
n_iters = 10
batch_size = 16
n_classes = 7
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
classification_report.attach(engine, "cr")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "cr" in engine.state.metrics
res = engine.state.metrics["cr"]
res2 = classification_report.compute()
assert res == res2
assert isinstance(res, dict if output_dict else str)
if not output_dict:
res = json.loads(res)
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
from sklearn.metrics import classification_report as sklearn_classification_report
sklearn_result = sklearn_classification_report(np_y_true, np_y_preds, output_dict=True, zero_division=1)
for i in range(n_classes):
torch.manual_seed(12 + rank + i)
label_i = labels[i] if labels else str(i)
assert sklearn_result[str(i)]["precision"] == pytest.approx(res[label_i]["precision"])
assert sklearn_result[str(i)]["f1-score"] == pytest.approx(res[label_i]["f1-score"])
assert sklearn_result[str(i)]["recall"] == pytest.approx(res[label_i]["recall"])
assert sklearn_result["macro avg"]["precision"] == pytest.approx(res["macro avg"]["precision"])
assert sklearn_result["macro avg"]["recall"] == pytest.approx(res["macro avg"]["recall"])
assert sklearn_result["macro avg"]["f1-score"] == pytest.approx(res["macro avg"]["f1-score"])
for _ in range(3):
# check multiple random inputs as random exact occurencies are rare
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(metric_device, 1)
_test(metric_device, 2)
_test(metric_device, 1, ["0", "1", "2", "3", "4", "5", "6"])
_test(metric_device, 2, ["0", "1", "2", "3", "4", "5", "6"])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_gloo):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_integration_multiclass, (device, True), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multiclass, (device, False), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multilabel, (device, True), np=nproc, do_init=True)
gloo_hvd_executor(_test_integration_multilabel, (device, False), np=nproc, do_init=True)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_integration_multiclass(device, True)
_test_integration_multiclass(device, False)
_test_integration_multilabel(device, True)
_test_integration_multilabel(device, False)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import ConfusionMatrix, IoU, JaccardIndex, mIoU
from ignite.metrics.confusion_matrix import cmAccuracy, cmPrecision, cmRecall, DiceCoefficient
torch.manual_seed(12)
def test_no_update():
cm = ConfusionMatrix(10)
with pytest.raises(NotComputableError, match=r"Confusion matrix must have at least one example before it "):
cm.compute()
def test_num_classes_wrong_input():
with pytest.raises(ValueError, match="Argument num_classes needs to be > 1"):
ConfusionMatrix(num_classes=1)
def test_multiclass_wrong_inputs():
cm = ConfusionMatrix(10)
with pytest.raises(
ValueError, match=r"y_pred must have shape \(batch_size, num_classes " r"\(currently set to 10\), ...\)"
):
cm.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y_pred does not have correct number of classes:"):
cm.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(
ValueError,
match=r"y_pred must have shape \(batch_size, num_classes "
r"\(currently set to 10\), ...\) "
r"and y must have ",
):
cm.update((torch.rand(4, 10, 12, 12), torch.randint(0, 10, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes."):
cm.update((torch.rand(4, 10, 12, 14), torch.randint(0, 10, size=(4, 5, 6)).long()))
with pytest.raises(ValueError, match=r"Argument average can None or one of"):
ConfusionMatrix(num_classes=10, average="abc")
with pytest.raises(ValueError, match=r"Argument average should be one of 'samples', 'recall', 'precision'"):
ConfusionMatrix.normalize(None, None)
@pytest.fixture(params=[item for item in range(10)])
def test_data(request):
return [
# Multiclass input data of shape (N, )
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)).long(), 4, 1),
(torch.rand(4, 10), torch.randint(0, 10, size=(4,)).long(), 10, 1),
(torch.rand(4, 2), torch.randint(0, 2, size=(4,)).long(), 2, 1),
(torch.rand(100, 5), torch.randint(0, 5, size=(100,)).long(), 5, 16),
# Multiclass input data of shape (N, L)
(torch.rand(10, 4, 5), torch.randint(0, 4, size=(10, 5)).long(), 4, 1),
(torch.rand(4, 10, 5), torch.randint(0, 10, size=(4, 5)).long(), 10, 1),
(torch.rand(100, 9, 7), torch.randint(0, 9, size=(100, 7)).long(), 9, 16),
# Multiclass input data of shape (N, H, W, ...)
(torch.rand(4, 5, 12, 10), torch.randint(0, 5, size=(4, 12, 10)).long(), 5, 1),
(torch.rand(4, 5, 10, 12, 8), torch.randint(0, 5, size=(4, 10, 12, 8)).long(), 5, 1),
(torch.rand(100, 3, 8, 8), torch.randint(0, 3, size=(100, 8, 8)).long(), 3, 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multiclass_input(n_times, test_data):
y_pred, y, num_classes, batch_size = test_data
cm = ConfusionMatrix(num_classes=num_classes)
cm.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
cm.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
cm.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert np.all(confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) == cm.compute().numpy())
def test_ignored_out_of_num_classes_indices():
num_classes = 21
cm = ConfusionMatrix(num_classes=num_classes)
y_pred = torch.rand(4, num_classes, 12, 10)
y = torch.randint(0, 255, size=(4, 12, 10)).long()
cm.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert np.all(confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) == cm.compute().numpy())
def get_y_true_y_pred():
# Generate an image with labels 0 (background), 1, 2
# 3 classes:
y_true = np.zeros((30, 30), dtype=np.int32)
y_true[1:11, 1:11] = 1
y_true[15:25, 15:25] = 2
y_pred = np.zeros((30, 30), dtype=np.int32)
y_pred[5:15, 1:11] = 1
y_pred[20:30, 20:30] = 2
return y_true, y_pred
def compute_th_y_true_y_logits(y_true, y_pred):
# Create torch.tensor from numpy
th_y_true = torch.from_numpy(y_true).unsqueeze(0)
# Create logits torch.tensor:
num_classes = max(np.max(y_true), np.max(y_pred)) + 1
y_probas = np.ones((num_classes,) + y_true.shape) * -10
for i in range(num_classes):
y_probas[i, (y_pred == i)] = 720
th_y_logits = torch.from_numpy(y_probas).unsqueeze(0)
return th_y_true, th_y_logits
def test_multiclass_images():
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().numpy()
assert np.all(true_res == res)
# Another test on batch of 2 images
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes)
# Create a batch of two images:
th_y_true1 = torch.from_numpy(y_true).reshape(1, 30, 30)
th_y_true2 = torch.from_numpy(y_true.transpose()).reshape(1, 30, 30)
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
# Create a batch of 2 logits tensors
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred == 0)] = 720
y_probas[1, (y_pred == 1)] = 720
y_probas[2, (y_pred == 2)] = 768
th_y_logits1 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred.transpose() == 0)] = 720
y_probas[1, (y_pred.transpose() == 2)] = 720
y_probas[2, (y_pred.transpose() == 1)] = 768
th_y_logits2 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
th_y_logits = torch.cat([th_y_logits1, th_y_logits2], dim=0)
# Update metric & compute
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().numpy()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(th_y_true.numpy().reshape(-1), np.argmax(th_y_logits.numpy(), axis=1).reshape(-1))
assert np.all(true_res == res)
def test_iou_wrong_input():
with pytest.raises(TypeError, match="Argument cm should be instance of ConfusionMatrix"):
IoU(None)
cm = ConfusionMatrix(num_classes=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given -1"):
IoU(cm, ignore_index=-1)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given a"):
IoU(cm, ignore_index="a")
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 10"):
IoU(cm, ignore_index=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 11"):
IoU(cm, ignore_index=11)
@pytest.mark.parametrize("average", [None, "samples"])
def test_iou(average):
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
cm = ConfusionMatrix(num_classes=3, average=average)
iou_metric = IoU(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
assert np.all(res == true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
iou_metric = IoU(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
with pytest.raises(ValueError, match=r"ConfusionMatrix should have average attribute either"):
cm = ConfusionMatrix(num_classes=3, average="precision")
IoU(cm)
def test_miou():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
true_res_ = np.mean(true_res)
cm = ConfusionMatrix(num_classes=3)
iou_metric = mIoU(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
assert res == true_res_
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
iou_metric = mIoU(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = iou_metric.compute().numpy()
true_res_ = np.mean(true_res[:ignore_index] + true_res[ignore_index + 1 :])
assert res == true_res_, f"{ignore_index}: {res} vs {true_res_}"
def test_cm_accuracy():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_acc = accuracy_score(y_true.reshape(-1), y_pred.reshape(-1))
cm = ConfusionMatrix(num_classes=3)
acc_metric = cmAccuracy(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = acc_metric.compute().numpy()
assert pytest.approx(res) == true_acc
def test_cm_precision():
y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro")
cm = ConfusionMatrix(num_classes=10)
pr_metric = cmPrecision(cm, average=True)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = pr_metric.compute().numpy()
assert pytest.approx(res) == true_pr
true_pr = precision_score(y_true.reshape(-1), y_pred.reshape(-1), average=None)
cm = ConfusionMatrix(num_classes=10)
pr_metric = cmPrecision(cm, average=False)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = pr_metric.compute().numpy()
assert np.all(res == true_pr)
def test_cm_recall():
y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro")
cm = ConfusionMatrix(num_classes=10)
re_metric = cmRecall(cm, average=True)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = re_metric.compute().numpy()
assert pytest.approx(res) == true_re
true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average=None)
cm = ConfusionMatrix(num_classes=10)
re_metric = cmRecall(cm, average=False)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = re_metric.compute().numpy()
assert np.all(res == true_re)
def test_cm_with_average():
num_classes = 5
y_pred = torch.rand(40, num_classes)
y = torch.randint(0, num_classes, size=(40,)).long()
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
cm = ConfusionMatrix(num_classes=num_classes, average="samples")
cm.update((y_pred, y))
true_res = confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) * 1.0 / len(np_y)
res = cm.compute().numpy()
np.testing.assert_almost_equal(true_res, res)
cm = ConfusionMatrix(num_classes=num_classes, average="recall")
cm.update((y_pred, y))
true_re = recall_score(np_y, np_y_pred, average=None, labels=list(range(num_classes)))
res = cm.compute().numpy().diagonal()
np.testing.assert_almost_equal(true_re, res)
res = cm.compute().numpy()
true_res = confusion_matrix(np_y, np_y_pred, normalize="true")
np.testing.assert_almost_equal(true_res, res)
cm = ConfusionMatrix(num_classes=num_classes, average="precision")
cm.update((y_pred, y))
true_pr = precision_score(np_y, np_y_pred, average=None, labels=list(range(num_classes)))
res = cm.compute().numpy().diagonal()
np.testing.assert_almost_equal(true_pr, res)
res = cm.compute().numpy()
true_res = confusion_matrix(np_y, np_y_pred, normalize="pred")
np.testing.assert_almost_equal(true_res, res)
def test_dice_coefficient_wrong_input():
with pytest.raises(TypeError, match="Argument cm should be instance of ConfusionMatrix"):
DiceCoefficient(None)
cm = ConfusionMatrix(num_classes=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given -1"):
DiceCoefficient(cm, ignore_index=-1)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given a"):
DiceCoefficient(cm, ignore_index="a")
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 10"):
DiceCoefficient(cm, ignore_index=10)
with pytest.raises(ValueError, match=r"ignore_index should be integer and in the range of \[0, 10\), but given 11"):
DiceCoefficient(cm, ignore_index=11)
def test_dice_coefficient():
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
# dice coefficient: 2*intersection(x, y) / (|x| + |y|)
# union(x, y) = |x| + |y| - intersection(x, y)
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = 2.0 * intersection.sum() / (union.sum() + intersection.sum())
cm = ConfusionMatrix(num_classes=3)
dice_metric = DiceCoefficient(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = dice_metric.compute().numpy()
np.testing.assert_allclose(res, true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
dice_metric = DiceCoefficient(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = dice_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
def _test_distrib_multiclass_images(device):
def _test(metric_device):
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes, device=metric_device)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
true_res = confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
th_y_true = th_y_true.to(device)
th_y_logits = th_y_logits.to(device)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().cpu().numpy() / idist.get_world_size()
assert np.all(true_res == res)
# Another test on batch of 2 images
num_classes = 3
cm = ConfusionMatrix(num_classes=num_classes, device=metric_device)
# Create a batch of two images:
th_y_true1 = torch.from_numpy(y_true).reshape(1, 30, 30)
th_y_true2 = torch.from_numpy(y_true.transpose()).reshape(1, 30, 30)
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_true = th_y_true.to(device)
# Create a batch of 2 logits tensors
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred == 0)] = 720
y_probas[1, (y_pred == 1)] = 720
y_probas[2, (y_pred == 2)] = 768
th_y_logits1 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
y_probas = np.ones((3, 30, 30)) * -10
y_probas[0, (y_pred.transpose() == 0)] = 720
y_probas[1, (y_pred.transpose() == 2)] = 720
y_probas[2, (y_pred.transpose() == 1)] = 768
th_y_logits2 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)
th_y_logits = torch.cat([th_y_logits1, th_y_logits2], dim=0)
# check update if input is on another device
th_y_logits = th_y_logits.to(device)
# Update metric & compute
output = (th_y_logits, th_y_true)
cm.update(output)
res = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_logits = idist.all_gather(th_y_logits)
np_y_true = th_y_true.cpu().numpy().reshape(-1)
np_y_pred = np.argmax(th_y_logits.cpu().numpy(), axis=1).reshape(-1)
true_res = confusion_matrix(np_y_true, np_y_pred)
assert np.all(true_res == res)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
cm = ConfusionMatrix(num_classes=3, device=metric_device)
assert cm._device == metric_device
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
cm.update((th_y_logits, th_y_true))
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
@pytest.mark.parametrize("average", [None, "samples"])
def test_jaccard_index(average):
y_true, y_pred = get_y_true_y_pred()
th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
true_res = [0, 0, 0]
for index in range(3):
bin_y_true = y_true == index
bin_y_pred = y_pred == index
intersection = bin_y_true & bin_y_pred
union = bin_y_true | bin_y_pred
true_res[index] = intersection.sum() / union.sum()
cm = ConfusionMatrix(num_classes=3, average=average)
jaccard_index = JaccardIndex(cm)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = jaccard_index.compute().numpy()
assert np.all(res == true_res)
for ignore_index in range(3):
cm = ConfusionMatrix(num_classes=3)
jaccard_index_metric = JaccardIndex(cm, ignore_index=ignore_index)
# Update metric
output = (th_y_logits, th_y_true)
cm.update(output)
res = jaccard_index_metric.compute().numpy()
true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
with pytest.raises(ValueError, match=r"ConfusionMatrix should have average attribute either"):
cm = ConfusionMatrix(num_classes=3, average="precision")
JaccardIndex(cm)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_multiclass_images, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_multiclass_images(device)
_test_distrib_accumulator_device(device)
|
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import precision_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import Precision
torch.manual_seed(12)
def test_no_update():
precision = Precision()
assert precision._updated is False
with pytest.raises(NotComputableError, match=r"Precision must have at least one example before it can be computed"):
precision.compute()
assert precision._updated is False
def test_average_parameter():
with pytest.raises(ValueError, match="Argument average should be None or a boolean or one of values"):
Precision(average=1)
pr = Precision(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
pr = Precision(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
pr.update((torch.rand(10, 3), torch.randint(0, 3, size=(10,)).long()))
assert pr._updated is False
pr = Precision(average=True)
assert pr._average == "macro"
def test_binary_wrong_inputs():
pr = Precision()
assert pr._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
pr.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
assert pr._updated is False
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
pr = Precision(average=None)
pr.update((torch.randint(0, 2, size=(10,)).float(), torch.randint(0, 2, size=(10,))))
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
pr = Precision(average=None)
pr.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).float()))
def ignite_average_to_scikit_average(average, data_type: str):
if average in [None, "micro", "samples", "weighted", "macro"]:
return average
if average is False:
if data_type == "binary":
return "binary"
else:
return None
elif average is True:
return "macro"
else:
raise ValueError(f"Wrong average parameter `{average}`")
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_binary_input(average):
pr = Precision(average=average)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert pr._type == "binary"
assert pr._updated is True
assert isinstance(pr.compute(), torch.Tensor if not average else float)
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "binary")
assert precision_score(
np_y, np_y_pred, average=sk_average_parameter, labels=[0, 1], zero_division=0
) == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 1, 5)), torch.randint(0, 2, size=(10, 1, 5)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 1, 5)), torch.randint(0, 2, size=(50, 1, 5)), 16),
# Binary accuracy on input of shape (N, H, W)
(torch.randint(0, 2, size=(10, 12, 10)), torch.randint(0, 2, size=(10, 12, 10)), 1),
(torch.randint(0, 2, size=(10, 1, 12, 10)), torch.randint(0, 2, size=(10, 1, 12, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 12, 10)), torch.randint(0, 2, size=(50, 12, 10)), 16),
(torch.randint(0, 2, size=(50, 1, 12, 10)), torch.randint(0, 2, size=(50, 1, 12, 10)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10,), dtype=torch.long), torch.randint(0, 2, size=(10,)), 1),
(torch.zeros(size=(10, 1), dtype=torch.long), torch.randint(0, 2, size=(10, 1)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multiclass_wrong_inputs():
pr = Precision()
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
assert pr._updated is False
pr = Precision(average=True)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
pr.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert pr._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
pr.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert pr._updated is True
pr = Precision(average=False)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
pr.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert pr._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
pr.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert pr._updated is True
with pytest.warns(
RuntimeWarning,
match="`y` should be of dtype long when entry type is multiclass",
):
pr = Precision()
pr.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).float()))
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_multiclass_input(average):
pr = Precision(average=average)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
num_classes = y_pred.shape[1]
np_y_pred = y_pred.argmax(dim=1).numpy().ravel()
np_y = y.numpy().ravel()
assert pr._type == "multiclass"
assert pr._updated is True
assert isinstance(pr.compute(), torch.Tensor if not average else float)
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter)
assert sk_compute == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 6), torch.randint(0, 6, size=(10,)), 1),
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)), 1),
# updated batches
(torch.rand(50, 6), torch.randint(0, 6, size=(50,)), 16),
(torch.rand(50, 4), torch.randint(0, 4, size=(50,)), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 5, 8), torch.randint(0, 5, size=(10, 8)), 1),
(torch.rand(10, 8, 12), torch.randint(0, 8, size=(10, 12)), 1),
# updated batches
(torch.rand(50, 5, 8), torch.randint(0, 5, size=(50, 8)), 16),
(torch.rand(50, 8, 12), torch.randint(0, 8, size=(50, 12)), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(10, 5, 18, 16), torch.randint(0, 5, size=(10, 18, 16)), 1),
(torch.rand(10, 7, 20, 12), torch.randint(0, 7, size=(10, 20, 12)), 1),
# updated batches
(torch.rand(50, 5, 18, 16), torch.randint(0, 5, size=(50, 18, 16)), 16),
(torch.rand(50, 7, 20, 12), torch.randint(0, 7, size=(50, 20, 12)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 6)), torch.randint(0, 6, size=(10,)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 4, size=(10,)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_wrong_inputs():
pr = Precision(is_multilabel=True)
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes
pr.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible y_pred
pr.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible y
pr.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
assert pr._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
pr.update((torch.randint(0, 2, size=(20, 5)), torch.randint(0, 2, size=(20, 5)).long()))
pr.update((torch.randint(0, 2, size=(20, 6)), torch.randint(0, 2, size=(20, 6)).long()))
assert pr._updated is True
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted", "samples"])
def test_multilabel_input(average):
pr = Precision(average=average, is_multilabel=True)
assert pr._updated is False
def _test(y_pred, y, batch_size):
pr.reset()
assert pr._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
pr.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert pr._type == "multilabel"
assert pr._updated is True
pr_compute = pr.compute().numpy() if not average else pr.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert precision_score(np_y, np_y_pred, average=sk_average_parameter) == pytest.approx(pr_compute)
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 4)), torch.randint(0, 2, size=(50, 4)), 16),
# Multilabel input data of shape (N, C, L)
(torch.randint(0, 2, size=(10, 5, 10)), torch.randint(0, 2, size=(10, 5, 10)), 1),
(torch.randint(0, 2, size=(10, 4, 10)), torch.randint(0, 2, size=(10, 4, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)), torch.randint(0, 2, size=(50, 5, 10)), 16),
(torch.randint(0, 2, size=(50, 4, 10)), torch.randint(0, 2, size=(50, 4, 10)), 16),
# Multilabel input data of shape (N, C, H, W)
(torch.randint(0, 2, size=(10, 5, 18, 16)), torch.randint(0, 2, size=(10, 5, 18, 16)), 1),
(torch.randint(0, 2, size=(10, 4, 20, 23)), torch.randint(0, 2, size=(10, 4, 20, 23)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 18, 16)), torch.randint(0, 2, size=(50, 5, 18, 16)), 16),
(torch.randint(0, 2, size=(50, 4, 20, 23)), torch.randint(0, 2, size=(50, 4, 20, 23)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_type(average):
# Tests changing of type during training
pr = Precision(average=average)
assert pr._updated is False
y_pred = torch.softmax(torch.rand(4, 4), dim=1)
y = torch.ones(4).long()
pr.update((y_pred, y))
assert pr._updated is True
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
pr.update((y_pred, y))
assert pr._updated is True
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_y_classes(average):
pr = Precision(average=average)
assert pr._updated is False
y_pred = torch.randint(0, 2, size=(10, 4)).float()
y = torch.randint(4, 5, size=(10,)).long()
with pytest.raises(ValueError):
pr.update((y_pred, y))
assert pr._updated is False
def test_distrib_integration_multiclass(distributed):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
s = 16
n_classes = 7
offset = n_iters * s
y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device)
y_preds = torch.rand(offset * idist.get_world_size(), n_classes).to(device)
def update(engine, i):
return (
y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, :],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset],
)
engine = Engine(update)
pr = Precision(average=average, device=metric_device)
pr.attach(engine, "pr")
assert pr._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
assert "pr" in engine.state.metrics
assert pr._updated is True
res = engine.state.metrics["pr"]
if isinstance(res, torch.Tensor):
# Fixes https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
assert res.device.type == "cpu"
res = res.cpu().numpy()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
true_res = precision_score(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average=sk_average_parameter
)
assert pytest.approx(res) == true_res
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for _ in range(2):
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
def test_distrib_integration_multilabel(distributed):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
s = 16
n_classes = 7
offset = n_iters * s
y_true = torch.randint(0, 2, size=(offset * idist.get_world_size(), n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(offset * idist.get_world_size(), n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * s + rank * offset : (i + 1) * s + rank * offset, ...],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset, ...],
)
engine = Engine(update)
pr = Precision(average=average, is_multilabel=True, device=metric_device)
pr.attach(engine, "pr")
assert pr._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
assert "pr" in engine.state.metrics
assert pr._updated is True
res = engine.state.metrics["pr"]
res2 = pr.compute()
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
res2 = res2.cpu().numpy()
assert (res == res2).all()
else:
assert res == res2
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
assert pr._type == "multilabel"
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert precision_score(np_y_true, np_y_preds, average=sk_average_parameter) == pytest.approx(res)
metric_devices = ["cpu"]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for _ in range(2):
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="samples", n_epochs=1, metric_device=metric_device)
_test(average="samples", n_epochs=2, metric_device=metric_device)
def test_distrib_accumulator_device(distributed):
# Binary accuracy on input of shape (N, 1) or (N, )
def _test(average, metric_device):
pr = Precision(average=average, device=metric_device)
assert pr._device == metric_device
assert pr._updated is False
# Since the shape of the accumulated amount isn't known before the first update
# call, the internal variables aren't tensors on the right device yet.
y_pred = torch.randint(0, 2, size=(10,))
y = torch.randint(0, 2, size=(10,)).long()
pr.update((y_pred, y))
assert pr._updated is True
assert (
pr._numerator.device == metric_device
), f"{type(pr._numerator.device)}:{pr._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
pr._denominator.device == metric_device
), f"{type(pr._denominator.device)}:{pr._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert pr._weight.device == metric_device, f"{type(pr._weight.device)}:{pr._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
def test_distrib_multilabel_accumulator_device(distributed):
# Multiclass input data of shape (N, ) and (N, C)
def _test(average, metric_device):
pr = Precision(is_multilabel=True, average=average, device=metric_device)
assert pr._updated is False
assert pr._device == metric_device
y_pred = torch.randint(0, 2, size=(10, 4, 20, 23))
y = torch.randint(0, 2, size=(10, 4, 20, 23)).long()
pr.update((y_pred, y))
assert pr._updated is True
assert (
pr._numerator.device == metric_device
), f"{type(pr._numerator.device)}:{pr._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
pr._denominator.device == metric_device
), f"{type(pr._denominator.device)}:{pr._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert pr._weight.device == metric_device, f"{type(pr._weight.device)}:{pr._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
device = idist.device()
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
_test("samples", metric_device=metric_device)
|
import numpy as np
import pytest
import torch
from sklearn.metrics import multilabel_confusion_matrix
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.multilabel_confusion_matrix import MultiLabelConfusionMatrix
torch.manual_seed(12)
def test_no_update():
cm = MultiLabelConfusionMatrix(10)
with pytest.raises(
NotComputableError, match=r"Confusion matrix must have at least one example before it can be computed"
):
cm.compute()
def test_num_classes_wrong_input():
with pytest.raises(ValueError, match="Argument num_classes needs to be > 1"):
MultiLabelConfusionMatrix(num_classes=1)
def test_multiclass_wrong_inputs():
cm = MultiLabelConfusionMatrix(10)
with pytest.raises(
ValueError, match=r"y_pred must at least have shape \(batch_size, num_classes \(currently set to 10\), ...\)"
):
cm.update((torch.rand(10), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(
ValueError, match=r"y must at least have shape \(batch_size, num_classes \(currently set to 10\), ...\)"
):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y_pred and y have different batch size: 10 vs 8"):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(8, 10)).long()))
with pytest.raises(ValueError, match=r"y does not have correct number of classes: 9 vs 10"):
cm.update((torch.rand(10, 10), torch.randint(0, 2, size=(10, 9)).long()))
with pytest.raises(ValueError, match=r"y_pred does not have correct number of classes: 3 vs 10"):
cm.update((torch.rand(10, 3), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(ValueError, match=r"y and y_pred shapes must match."):
cm.update((torch.rand(10, 10, 2), torch.randint(0, 2, size=(10, 10)).long()))
with pytest.raises(
ValueError,
match=r"y_pred must be of any type: \(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64\)",
):
cm.update((torch.rand(10, 10), torch.rand(10, 10)))
with pytest.raises(
ValueError, match=r"y must be of any type: \(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64\)"
):
cm.update((torch.rand(10, 10).type(torch.int32), torch.rand(10, 10)))
with pytest.raises(ValueError, match=r"y_pred must be a binary tensor"):
y = torch.randint(0, 2, size=(10, 10)).long()
y_pred = torch.randint(0, 2, size=(10, 10)).long()
y_pred[0, 0] = 2
cm.update((y_pred, y))
with pytest.raises(ValueError, match=r"y must be a binary tensor"):
y = torch.randint(0, 2, size=(10, 10)).long()
y_pred = torch.randint(0, 2, size=(10, 10)).long()
y[0, 0] = 2
cm.update((y_pred, y))
def get_y_true_y_pred():
# Generate an image with labels 0 (background), 1, 2
# 3 classes:
y_true = np.zeros((1, 3, 30, 30), dtype=np.int64)
y_true[0, 0, 5:17, 7:11] = 1
y_true[0, 1, 1:11, 1:11] = 1
y_true[0, 2, 15:25, 15:25] = 1
y_pred = np.zeros((1, 3, 30, 30), dtype=np.int64)
y_pred[0, 0, 0:7, 8:15] = 1
y_pred[0, 1, 5:15, 1:11] = 1
y_pred[0, 2, 20:30, 20:30] = 1
return y_true, y_pred
def test_multiclass_images():
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
sklearn_CM = multilabel_confusion_matrix(
y_true.transpose((0, 2, 3, 1)).reshape(-1, 3), y_pred.transpose((0, 2, 3, 1)).reshape(-1, 3)
)
# Update metric
output = (torch.tensor(y_pred), torch.tensor(y_true))
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
assert np.all(ignite_CM == sklearn_CM)
# Another test on batch of 2 images
cm = MultiLabelConfusionMatrix(num_classes=num_classes)
# Create a batch of two images:
th_y_true1 = torch.tensor(y_true)
th_y_true2 = torch.tensor(y_true.transpose(0, 1, 3, 2))
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_pred1 = torch.tensor(y_pred)
th_y_pred2 = torch.tensor(y_pred.transpose(0, 1, 3, 2))
th_y_pred = torch.cat([th_y_pred1, th_y_pred2], dim=0)
# Update metric & compute
output = (th_y_pred, th_y_true)
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_pred = idist.all_gather(th_y_pred)
np_y_true = th_y_true.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
np_y_pred = th_y_pred.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
sklearn_CM = multilabel_confusion_matrix(np_y_true, np_y_pred)
assert np.all(ignite_CM == sklearn_CM)
def _test_distrib_multiclass_images(device):
def _test(metric_device):
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes, device=metric_device)
y_true, y_pred = get_y_true_y_pred()
# Compute confusion matrix with sklearn
sklearn_CM = multilabel_confusion_matrix(
y_true.transpose((0, 2, 3, 1)).reshape(-1, 3), y_pred.transpose((0, 2, 3, 1)).reshape(-1, 3)
)
# Update metric
output = (torch.tensor(y_pred).to(device), torch.tensor(y_true).to(device))
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
assert np.all(ignite_CM == sklearn_CM)
# Another test on batch of 2 images
num_classes = 3
cm = MultiLabelConfusionMatrix(num_classes=num_classes, device=metric_device)
# Create a batch of two images:
th_y_true1 = torch.tensor(y_true)
th_y_true2 = torch.tensor(y_true.transpose(0, 1, 3, 2))
th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)
th_y_true = th_y_true.to(device)
th_y_pred1 = torch.tensor(y_pred)
th_y_pred2 = torch.tensor(y_pred.transpose(0, 1, 3, 2))
th_y_pred = torch.cat([th_y_pred1, th_y_pred2], dim=0)
th_y_pred = th_y_pred.to(device)
# Update metric & compute
output = (th_y_pred, th_y_true)
cm.update(output)
ignite_CM = cm.compute().cpu().numpy()
# Compute confusion matrix with sklearn
th_y_true = idist.all_gather(th_y_true)
th_y_pred = idist.all_gather(th_y_pred)
np_y_true = th_y_true.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
np_y_pred = th_y_pred.cpu().numpy().transpose((0, 2, 3, 1)).reshape(-1, 3)
sklearn_CM = multilabel_confusion_matrix(np_y_true, np_y_pred)
assert np.all(ignite_CM == sklearn_CM)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
cm = MultiLabelConfusionMatrix(num_classes=3, device=metric_device)
assert cm._device == metric_device
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_true, y_pred = get_y_true_y_pred()
cm.update((torch.tensor(y_pred), torch.tensor(y_true)))
assert (
cm.confusion_matrix.device == metric_device
), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
def test_simple_2D_input():
# Tests for 2D inputs with normalized = True and False
num_iters = 5
num_samples = 100
num_classes = 10
torch.manual_seed(0)
for _ in range(num_iters):
target = torch.randint(0, 2, size=(num_samples, num_classes))
prediction = torch.randint(0, 2, size=(num_samples, num_classes))
sklearn_CM = multilabel_confusion_matrix(target.numpy(), prediction.numpy())
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=True)
mlcm.update([prediction, target])
ignite_CM_normalized = mlcm.compute().numpy()
sklearn_CM_normalized = sklearn_CM / sklearn_CM.sum(axis=(1, 2))[:, None, None]
assert np.allclose(sklearn_CM_normalized, ignite_CM_normalized)
def test_simple_ND_input():
num_iters = 5
num_samples = 100
num_classes = 10
torch.manual_seed(0)
size_3d = 4
for _ in range(num_iters): # 3D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 1).reshape(size_3d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 1).reshape(size_3d * num_samples, num_classes)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_4d = 4
for _ in range(num_iters): # 4D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 3, 1).reshape(size_3d * size_4d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 3, 1).reshape(size_3d * size_4d * num_samples, num_classes)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_5d = 4
for _ in range(num_iters): # 5D tests
target = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d, size_5d))
prediction = torch.randint(0, 2, size=(num_samples, num_classes, size_3d, size_4d, size_5d))
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
mlcm.update([prediction, target])
ignite_CM = mlcm.compute().numpy()
target_reshaped = target.permute(0, 2, 3, 4, 1).reshape(size_3d * size_4d * size_5d * num_samples, num_classes)
prediction_reshaped = prediction.permute(0, 2, 3, 4, 1).reshape(
size_3d * size_4d * size_5d * num_samples, num_classes
)
sklearn_CM = multilabel_confusion_matrix(target_reshaped.numpy(), prediction_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
def test_simple_batched():
num_iters = 5
num_samples = 100
num_classes = 10
batch_size = 1
torch.manual_seed(0)
for _ in range(num_iters): # 2D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes))
predictions = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes))
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.reshape(-1, num_classes)
predictions_reshaped = predictions.reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_3d = 4
for _ in range(num_iters): # 3D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d))
predictions = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d))
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_4d = 4
for _ in range(num_iters): # 4D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d))
predictions = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d)
)
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 4, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 4, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
size_5d = 4
for _ in range(num_iters): # 5D tests
mlcm = MultiLabelConfusionMatrix(num_classes, normalized=False)
targets = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d, size_5d)
)
predictions = torch.randint(
0, 2, size=(int(num_samples / batch_size), batch_size, num_classes, size_3d, size_4d, size_5d)
)
for i in range(int(num_samples / batch_size)):
target_sample = targets[i]
prediction_sample = predictions[i]
mlcm.update([prediction_sample, target_sample])
ignite_CM = mlcm.compute().numpy()
targets_reshaped = targets.permute(0, 1, 3, 4, 5, 2).reshape(-1, num_classes)
predictions_reshaped = predictions.permute(0, 1, 3, 4, 5, 2).reshape(-1, num_classes)
sklearn_CM = multilabel_confusion_matrix(targets_reshaped.numpy(), predictions_reshaped.numpy())
assert np.all(sklearn_CM.astype(np.int64) == ignite_CM.astype(np.int64))
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
# def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.distributed
# @pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
# @pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
# def test_distrib_hvd(gloo_hvd_executor):
# device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
# nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
# gloo_hvd_executor(_test_distrib_multiclass_images, (device,), np=nproc, do_init=True)
# gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
# @pytest.mark.multinode_distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
# def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
#
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.multinode_distributed
# @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
# @pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
# def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
#
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.tpu
# @pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
# @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
# def test_distrib_single_device_xla():
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# def _test_distrib_xla_nprocs(index):
# device = idist.device()
# _test_distrib_multiclass_images(device)
# _test_distrib_accumulator_device(device)
# @pytest.mark.tpu
# @pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
# @pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
# def test_distrib_xla_nprocs(xmp_executor):
# n = int(os.environ["NUM_TPU_WORKERS"])
# xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanPairwiseDistance
def test_zero_sample():
mpd = MeanPairwiseDistance()
with pytest.raises(
NotComputableError, match=r"MeanAbsoluteError must have at least one example before it can be computed"
):
mpd.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mpd = MeanPairwiseDistance()
y_pred, y, batch_size = test_case
mpd.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mpd.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mpd.update((y_pred, y))
np_res = np.mean(torch.pairwise_distance(y_pred, y, p=mpd._p, eps=mpd._eps).numpy())
assert isinstance(mpd.compute(), float)
assert pytest.approx(mpd.compute()) == np_res
def _test_distrib_integration(device):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
def _test(metric_device):
n_iters = 100
batch_size = 50
y_true = torch.rand(n_iters * batch_size, 10).to(device)
y_preds = torch.rand(n_iters * batch_size, 10).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
m = MeanPairwiseDistance(device=metric_device)
m.attach(engine, "mpwd")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mpwd" in engine.state.metrics
res = engine.state.metrics["mpwd"]
true_res = []
for i in range(n_iters * idist.get_world_size()):
true_res.append(
torch.pairwise_distance(
y_true[i * batch_size : (i + 1) * batch_size, ...],
y_preds[i * batch_size : (i + 1) * batch_size, ...],
p=m._p,
eps=m._eps,
)
.cpu()
.numpy()
)
true_res = np.array(true_res).ravel()
true_res = true_res.mean()
assert pytest.approx(res) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
mpd = MeanPairwiseDistance(device=metric_device)
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[3.0, 4.0], [-3.0, -4.0]])
y = torch.zeros(2, 2)
mpd.update((y_pred, y))
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mpd = MeanPairwiseDistance()
y_pred = torch.tensor([[3.0, 4.0], [-3.0, -4.0]], requires_grad=True)
y = torch.zeros(2, 2)
mpd.update((y_pred, y))
assert not mpd._sum_of_distances.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import EpochMetric
from ignite.metrics.epoch_metric import EpochMetricWarning, NotComputableError
def test_epoch_metric_wrong_setup_or_input():
# Wrong compute function
with pytest.raises(TypeError, match=r"Argument compute_fn should be callable."):
EpochMetric(12345)
def compute_fn(y_preds, y_targets):
return 0.0
em = EpochMetric(compute_fn)
# Wrong input dims
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
output = (torch.tensor(0), torch.tensor(0))
em.update(output)
# Wrong input dims
with pytest.raises(ValueError, match=r"Targets should be of shape"):
output = (torch.rand(4, 3), torch.rand(4, 3, 1))
em.update(output)
# Wrong input dims
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
output = (torch.rand(4, 3, 1), torch.rand(4, 3))
em.update(output)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
with pytest.raises(ValueError, match=r"Incoherent types between input y_pred and stored predictions"):
output2 = (torch.randint(0, 5, size=(4, 3)), torch.randint(0, 2, size=(4, 3)))
em.update(output2)
with pytest.raises(ValueError, match=r"Incoherent types between input y and stored targets"):
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3)).to(torch.int32))
em.update(output2)
with pytest.raises(
NotComputableError, match="EpochMetric must have at least one example before it can be computed"
):
em = EpochMetric(compute_fn)
em.compute()
def test_epoch_metric():
def compute_fn(y_preds, y_targets):
return 0.0
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
assert all([t.device.type == "cpu" for t in em._predictions + em._targets])
assert torch.equal(em._predictions[0], output1[0])
assert torch.equal(em._predictions[1], output2[0])
assert torch.equal(em._targets[0], output1[1])
assert torch.equal(em._targets[1], output2[1])
assert em.compute() == 0.0
# test when y and y_pred are (batch_size, 1) that are squeezed to (batch_size, )
em.reset()
output1 = (torch.rand(4, 1), torch.randint(0, 2, size=(4, 1), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 1), torch.randint(0, 2, size=(4, 1), dtype=torch.long))
em.update(output2)
assert all([t.device.type == "cpu" for t in em._predictions + em._targets])
assert torch.equal(em._predictions[0], output1[0][:, 0])
assert torch.equal(em._predictions[1], output2[0][:, 0])
assert torch.equal(em._targets[0], output1[1][:, 0])
assert torch.equal(em._targets[1], output2[1][:, 0])
assert em.compute() == 0.0
def test_mse_epoch_metric():
def compute_fn(y_preds, y_targets):
return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
output3 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output3)
preds = torch.cat([output1[0], output2[0], output3[0]], dim=0)
targets = torch.cat([output1[1], output2[1], output3[1]], dim=0)
result = em.compute()
assert result == compute_fn(preds, targets)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output1)
output2 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output2)
output3 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
em.update(output3)
preds = torch.cat([output1[0], output2[0], output3[0]], dim=0)
targets = torch.cat([output1[1], output2[1], output3[1]], dim=0)
result = em.compute()
assert result == compute_fn(preds, targets)
def test_bad_compute_fn():
def compute_fn(y_preds, y_targets):
# Following will raise the error:
# The size of tensor a (3) must match the size of tensor b (4)
# at non-singleton dimension 1
return torch.mean(y_preds - y_targets).item()
em = EpochMetric(compute_fn)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 4), dtype=torch.long))
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output1)
def test_check_compute_fn():
def compute_fn(y_preds, y_targets):
raise Exception
em = EpochMetric(compute_fn, check_compute_fn=True)
em.reset()
output1 = (torch.rand(4, 3), torch.randint(0, 2, size=(4, 3), dtype=torch.long))
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output1)
em = EpochMetric(compute_fn, check_compute_fn=False)
em.update(output1)
def test_distrib_integration(distributed):
device = idist.device() if idist.device().type != "xla" else "cpu"
rank = idist.get_rank()
torch.manual_seed(40 + rank)
n_iters = 3
batch_size = 2
n_classes = 7
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,), device=device)
y_preds = torch.rand(n_iters * batch_size, n_classes, device=device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
def assert_data_fn(all_preds, all_targets):
return (all_preds.argmax(dim=1) == all_targets).sum().item()
ep_metric = EpochMetric(assert_data_fn, check_compute_fn=False, device=device)
ep_metric.attach(engine, "epm")
data = list(range(n_iters))
engine.run(data=data, max_epochs=3)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
ep_metric_true = (y_preds.argmax(dim=1) == y_true).sum().item()
assert engine.state.metrics["epm"] == ep_metric_true
assert ep_metric.compute() == ep_metric_true
|
# Needed to collect coverage data
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanAbsoluteError
def test_no_update():
mae = MeanAbsoluteError()
with pytest.raises(
NotComputableError, match=r"MeanAbsoluteError must have at least one example before it can be computed"
):
mae.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-10, 10, size=(100, 5)), torch.randint(-10, 10, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mae = MeanAbsoluteError()
y_pred, y, batch_size = test_case
mae.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mae.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mae.update((y_pred, y, batch_size))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_res = (np.abs(np_y_pred - np_y)).sum() / np_y.shape[0]
assert isinstance(mae.compute(), float)
assert mae.compute() == np_res
def _test_distrib_integration(device):
import numpy as np
from ignite.engine import Engine
rank = idist.get_rank()
def _test(metric_device):
n_iters = 80
batch_size = 50
torch.manual_seed(12 + rank)
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanAbsoluteError(device=metric_device)
m.attach(engine, "mae")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mae" in engine.state.metrics
res = engine.state.metrics["mae"]
true_res = np.mean(np.abs((y_true - y_preds).cpu().numpy()))
assert pytest.approx(res) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
mae = MeanAbsoluteError(device=metric_device)
for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mae.update((y_pred, y))
for dev in [mae._device, mae._sum_of_absolute_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mae = MeanAbsoluteError()
y_pred = torch.tensor([[2.0], [-2.0]], requires_grad=True)
y = torch.zeros(2)
mae.update((y_pred, y))
assert not mae._sum_of_absolute_errors.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from pytest import approx
from sklearn.metrics import f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import Metric, MetricsLambda, Precision, Recall
class ListGatherMetric(Metric):
def __init__(self, index):
super(ListGatherMetric, self).__init__()
self.index = index
def reset(self):
self.list_ = None
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
def test_metrics_lambda():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
def process_function(engine, data):
return data
engine = Engine(process_function)
def plus(this, other):
return this + other
m0_plus_m1 = MetricsLambda(plus, m0, other=m1)
m2_plus_2 = MetricsLambda(plus, m2, 2)
m0_plus_m1.attach(engine, "m0_plus_m1")
m2_plus_2.attach(engine, "m2_plus_2")
engine.run([[1, 10, 100]])
assert engine.state.metrics["m0_plus_m1"] == 11
assert engine.state.metrics["m2_plus_2"] == 102
engine.run([[2, 20, 200]])
assert engine.state.metrics["m0_plus_m1"] == 22
assert engine.state.metrics["m2_plus_2"] == 202
# metrics are partially attached
assert not m0.is_attached(engine)
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
# a dependency is detached
m0.detach(engine)
# so the lambda metric is too
assert not m0_plus_m1.is_attached(engine)
# the lambda is attached again
m0_plus_m1.attach(engine, "m0_plus_m1")
assert m0_plus_m1.is_attached(engine)
# metrics are always partially attached
assert not m0.is_attached(engine)
m0_plus_m1.detach(engine)
assert not m0_plus_m1.is_attached(engine)
# detached (and no longer partially attached)
assert not m0.is_attached(engine)
def test_metrics_lambda_reset():
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
def fn(x, y, z, t):
return 1
m = MetricsLambda(fn, m0, m1, z=m2, t=0)
# initiating a new instance of MetricsLambda must reset
# its argument metrics
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
m0.update([1, 10, 100])
m1.update([1, 10, 100])
m2.update([1, 10, 100])
m.reset()
assert m0.list_ is None
assert m1.list_ is None
assert m2.list_ is None
def test_metrics_lambda_update_and_attach_together():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
engine = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(engine, "f1")
with pytest.raises(ValueError, match=r"MetricsLambda is already attached to an engine"):
F1.update((y_pred, y))
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.update((y_pred, y))
engine = Engine(update_fn)
with pytest.raises(ValueError, match=r"The underlying metrics are already updated"):
F1.attach(engine, "f1")
F1.reset()
F1.attach(engine, "f1")
def test_metrics_lambda_update():
"""
Test if the underlying metrics are updated
"""
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.update((y_pred, y))
assert precision._updated
assert recall._updated
F1.reset()
assert not precision._updated
assert not recall._updated
"""
Test multiple updates and if the inputs of
the underlying metrics are updated multiple times
"""
y_pred1 = torch.randint(0, 2, size=(15,))
y1 = torch.randint(0, 2, size=(15,))
y_pred2 = torch.randint(0, 2, size=(15,))
y2 = torch.randint(0, 2, size=(15,))
F1.update((y_pred1, y1))
F1.update((y_pred2, y2))
# Compute true_positives and positives for precision
correct1 = y1 * y_pred1
all_positives1 = y_pred1.sum(dim=0)
if correct1.sum() == 0:
true_positives1 = torch.zeros_like(all_positives1)
else:
true_positives1 = correct1.sum(dim=0)
correct2 = y2 * y_pred2
all_positives2 = y_pred2.sum(dim=0)
if correct2.sum() == 0:
true_positives2 = torch.zeros_like(all_positives2)
else:
true_positives2 = correct2.sum(dim=0)
true_positives = true_positives1 + true_positives2
positives = all_positives1 + all_positives2
assert precision._type == "binary"
assert precision._numerator == true_positives
assert precision._denominator == positives
# Computing positivies for recall is different
positives1 = y1.sum(dim=0)
positives2 = y2.sum(dim=0)
positives = positives1 + positives2
assert recall._type == "binary"
assert recall._numerator == true_positives
assert recall._denominator == positives
"""
Test compute
"""
F1.reset()
F1.update((y_pred1, y1))
F1_metrics_lambda = F1.compute()
F1_sklearn = f1_score(y1.numpy(), y_pred1.numpy())
assert pytest.approx(F1_metrics_lambda) == F1_sklearn
@pytest.mark.parametrize("attach_pr_re", [True, False])
def test_integration(attach_pr_re):
torch.manual_seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = torch.arange(0, n_iters * batch_size) % n_classes
y_pred = 0.2 * torch.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if torch.rand(1) > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = torch.randint(0, n_classes, size=(1,))
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return y_pred_batch, y_true_batch
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
if attach_pr_re:
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, y_pred.argmax(dim=-1), average=None)
recall_true = recall_score(y_true, y_pred.argmax(dim=-1), average=None)
f1_true = f1_score(y_true, y_pred.argmax(dim=-1), average="macro")
assert f1_true == approx(state.metrics["f1"]), f"{f1_true} vs {state.metrics['f1']}"
if attach_pr_re:
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
assert precision_true == approx(precision), f"{precision_true} vs {precision}"
assert recall_true == approx(recall), f"{recall_true} vs {recall}"
def test_state_metrics():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["precision", "recall", "f1"])
def test_state_metrics_ingredients_not_attached():
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall + 1e-20)
F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
F1.attach(evaluator, "F1")
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = evaluator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set(["F1"])
def test_recursive_attachment():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.randint(0, 2, size=(15, 10, 4)).float()
y = torch.randint(0, 2, size=(15, 10, 4)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = y_pred.numpy().ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision()
precision_2 = Precision()
summed_precision = precision_1 + precision_2
def compute_true_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return p1 + p2
_test(summed_precision, "summed precision", compute_true_value_fn=compute_true_summed_precision)
precision_1 = Precision()
precision_2 = Precision()
mean_precision = (precision_1 + precision_2) / 2
def compute_true_mean_precision(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return (p1 + p2) * 0.5
_test(mean_precision, "mean precision", compute_true_value_fn=compute_true_mean_precision)
precision_1 = Precision()
precision_2 = Precision()
some_metric = 2.0 + 0.2 * (precision_1 * precision_2 + precision_1 - precision_2) ** 0.5
def compute_true_somemetric(y_pred, y):
p1 = precision_score(y, y_pred)
p2 = precision_score(y, y_pred)
return 2.0 + 0.2 * (p1 * p2 + p1 - p2) ** 0.5
_test(some_metric, "some metric", compute_true_somemetric)
def _test_distrib_integration(device):
rank = idist.get_rank()
n_iters = 10
batch_size = 10
n_classes = 10
def _test(metric_device):
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.int64).to(device) % n_classes
y_pred = 0.2 * torch.rand(n_iters * batch_size, n_classes).to(device)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
def update_fn(engine, i):
y_true_batch = y_true[i * batch_size : (i + 1) * batch_size, ...]
y_pred_batch = y_pred[i * batch_size : (i + 1) * batch_size, ...]
return y_pred_batch, y_true_batch
evaluator = Engine(update_fn)
precision = Precision(average=False, device=metric_device)
recall = Recall(average=False, device=metric_device)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
another_f1 = (1.0 + precision * recall * 2 / (precision + recall + 1e-20)).mean().item()
another_f1.attach(evaluator, "ff1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
y_pred = idist.all_gather(y_pred)
y_true = idist.all_gather(y_true)
assert "f1" in state.metrics
assert "ff1" in state.metrics
f1_true = f1_score(y_true.view(-1).cpu(), y_pred.view(-1, n_classes).argmax(dim=-1).cpu(), average="macro")
assert f1_true == approx(state.metrics["f1"])
assert 1.0 + f1_true == approx(state.metrics["ff1"])
for i in range(3):
torch.manual_seed(12 + rank + i)
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_metrics_on_diff_devices(device):
n_classes = 10
n_iters = 12
batch_size = 16
rank = idist.get_rank()
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
evaluator = Engine(update)
precision = Precision(average=False, device="cpu")
recall = Recall(average=False, device=device)
def Fbeta(r, p, beta):
return torch.mean((1 + beta**2) * p * r / (beta**2 * p + r)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F1.attach(evaluator, "f1")
another_f1 = (1.0 + precision * recall * 2 / (precision + recall + 1e-20)).mean().item()
another_f1.attach(evaluator, "ff1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "f1" in state.metrics
assert "ff1" in state.metrics
f1_true = f1_score(y_true.view(-1).cpu(), y_preds.view(-1, n_classes).argmax(dim=-1).cpu(), average="macro")
assert f1_true == approx(state.metrics["f1"])
assert 1.0 + f1_true == approx(state.metrics["ff1"])
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_metrics_on_diff_devices(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_metrics_on_diff_devices, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_metrics_on_diff_devices(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import numpy as np
import pytest
import torch
from skimage.metrics import peak_signal_noise_ratio as ski_psnr
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics import PSNR
from ignite.utils import manual_seed
def test_zero_div():
psnr = PSNR(1.0)
with pytest.raises(NotComputableError, match="PSNR must have at least one example before it can be computed"):
psnr.compute()
def test_invalid_psnr():
y_pred = torch.rand(1, 3, 8, 8)
y = torch.rand(1, 3, 8, 8)
psnr = PSNR(1.0)
with pytest.raises(TypeError, match="Expected y_pred and y to have the same data type."):
psnr.update((y_pred, y.double()))
with pytest.raises(ValueError, match="Expected y_pred and y to have the same shape."):
psnr.update((y_pred, y.squeeze(dim=0)))
@pytest.fixture(params=["float", "YCbCr", "uint8", "NHW shape"])
def test_data(request, available_device):
manual_seed(42)
if request.param == "float":
y_pred = torch.rand(8, 3, 28, 28, device=available_device)
y = y_pred * 0.8
elif request.param == "YCbCr":
y_pred = torch.randint(16, 236, (4, 1, 12, 12), dtype=torch.uint8, device=available_device)
y = torch.randint(16, 236, (4, 1, 12, 12), dtype=torch.uint8, device=available_device)
elif request.param == "uint8":
y_pred = torch.randint(0, 256, (4, 3, 16, 16), dtype=torch.uint8, device=available_device)
y = (y_pred * 0.8).to(torch.uint8)
elif request.param == "NHW shape":
y_pred = torch.rand(8, 28, 28, device=available_device)
y = y_pred * 0.8
else:
raise ValueError(f"Wrong fixture parameter, given {request.param}")
return (y_pred, y)
def test_psnr(test_data, available_device):
y_pred, y = test_data
data_range = (y.max() - y.min()).cpu().item()
psnr = PSNR(data_range=data_range, device=available_device)
psnr.update(test_data)
psnr_compute = psnr.compute()
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert psnr_compute > 0.0
assert isinstance(psnr_compute, float)
assert np.allclose(psnr_compute, np_psnr / np_y.shape[0])
def _test(
y_pred,
y,
data_range,
metric_device,
n_iters,
batch_size,
atol,
output_transform=lambda x: x,
compute_y_channel=False,
):
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size],
y[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
psnr = PSNR(data_range=data_range, output_transform=output_transform, device=metric_device)
psnr.attach(engine, "psnr")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y = idist.all_gather(y)
y_pred = idist.all_gather(y_pred)
assert "psnr" in engine.state.metrics
result = engine.state.metrics["psnr"]
assert result > 0.0
if compute_y_channel:
np_y_pred = y_pred[:, 0, ...].cpu().numpy()
np_y = y[:, 0, ...].cpu().numpy()
else:
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert np.allclose(result, np_psnr / np_y.shape[0], atol=atol)
def test_distrib_input_float(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.rand(n_iters * batch_size, 2, 2, device=device)
y = y_pred * 0.65
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 1, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 1, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_multilabel_input_YCbCr(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.randint(16, 236, (n_iters * batch_size, 1, 12, 12), dtype=torch.uint8, device=device)
cbcr_pred = torch.randint(16, 241, (n_iters * batch_size, 2, 12, 12), dtype=torch.uint8, device=device)
y = torch.randint(16, 236, (n_iters * batch_size, 1, 12, 12), dtype=torch.uint8, device=device)
cbcr = torch.randint(16, 241, (n_iters * batch_size, 2, 12, 12), dtype=torch.uint8, device=device)
y_pred, y = torch.cat((y_pred, cbcr_pred), dim=1), torch.cat((y, cbcr), dim=1)
return y_pred, y
n_iters = 100
batch_size = 10
def out_fn(x):
return x[0][:, 0, ...], x[1][:, 0, ...]
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 220, "cpu", n_iters, batch_size, atol=1e-8, output_transform=out_fn, compute_y_channel=True)
if device.type != "xla":
dev = idist.device()
_test(y_pred, y, 220, dev, n_iters, batch_size, atol=1e-8, output_transform=out_fn, compute_y_channel=True)
def test_distrib_multilabel_input_uint8(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.randint(0, 256, (n_iters * batch_size, 3, 16, 16), device=device, dtype=torch.uint8)
y = (y_pred * 0.65).to(torch.uint8)
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 100, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 100, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_multilabel_input_NHW(distributed):
device = idist.device()
def get_test_cases():
y_pred = torch.rand(n_iters * batch_size, 28, 28, device=device)
y = y_pred * 0.8
return y_pred, y
n_iters = 100
batch_size = 10
rank = idist.get_rank()
for i in range(3):
# check multiple random inputs as random exact occurencies are rare
torch.manual_seed(42 + rank + i)
y_pred, y = get_test_cases()
_test(y_pred, y, 10, "cpu", n_iters, batch_size, atol=1e-8)
if device.type != "xla":
_test(y_pred, y, 10, idist.device(), n_iters, batch_size, atol=1e-8)
def test_distrib_accumulator_device(distributed):
device = idist.device()
metric_devices = [torch.device("cpu")]
if torch.device(device).type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
psnr = PSNR(data_range=1.0, device=metric_device)
dev = psnr._device
assert dev == metric_device, f"{dev} vs {metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
psnr.update((y_pred, y))
dev = psnr._sum_of_batchwise_psnr.device
assert dev == metric_device, f"{dev} vs {metric_device}"
|
import os
import pytest
import torch
from sklearn.metrics import accuracy_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics import Accuracy
torch.manual_seed(12)
def test_no_update():
acc = Accuracy()
with pytest.raises(NotComputableError, match=r"Accuracy must have at least one example before it can be computed"):
acc.compute()
def test__check_shape():
acc = Accuracy()
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError, match=r"y and y_pred must have compatible shapes"):
acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 5)).long()))
def test__check_type():
acc = Accuracy()
with pytest.raises(RuntimeError, match=r"Invalid shapes of y"):
acc._check_type((torch.rand([1, 1, 1]), torch.rand([1])))
def test_binary_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
acc.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
@pytest.fixture(params=range(12))
def test_data_binary(request):
return [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 1),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50,)).long(), torch.randint(0, 2, size=(50,)).long(), 16),
(torch.randint(0, 2, size=(50, 1)).long(), torch.randint(0, 2, size=(50, 1)).long(), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)).long(), torch.randint(0, 2, size=(10, 5)).long(), 1),
(torch.randint(0, 2, size=(10, 8)).long(), torch.randint(0, 2, size=(10, 8)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)).long(), torch.randint(0, 2, size=(50, 5)).long(), 16),
(torch.randint(0, 2, size=(50, 8)).long(), torch.randint(0, 2, size=(50, 8)).long(), 16),
# Binary accuracy on input of shape (N, H, W, ...)
(torch.randint(0, 2, size=(4, 1, 12, 10)).long(), torch.randint(0, 2, size=(4, 1, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(15, 1, 20, 10)).long(), torch.randint(0, 2, size=(15, 1, 20, 10)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 1, 12, 10)).long(), torch.randint(0, 2, size=(50, 1, 12, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 1, 20, 10)).long(), torch.randint(0, 2, size=(50, 1, 20, 10)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_binary_input(n_times, test_data_binary):
acc = Accuracy()
y_pred, y, batch_size = test_data_binary
acc.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def test_multiclass_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
@pytest.fixture(params=range(11))
def test_data_multiclass(request):
return [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)).long(), 1),
(torch.rand(10, 10, 1), torch.randint(0, 18, size=(10, 1)).long(), 1),
(torch.rand(10, 18), torch.randint(0, 18, size=(10,)).long(), 1),
(torch.rand(4, 10), torch.randint(0, 10, size=(4,)).long(), 1),
# 2-classes
(torch.rand(4, 2), torch.randint(0, 2, size=(4,)).long(), 1),
(torch.rand(100, 5), torch.randint(0, 5, size=(100,)).long(), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 4, 5), torch.randint(0, 4, size=(10, 5)).long(), 1),
(torch.rand(4, 10, 5), torch.randint(0, 10, size=(4, 5)).long(), 1),
(torch.rand(100, 9, 7), torch.randint(0, 9, size=(100, 7)).long(), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(4, 5, 12, 10), torch.randint(0, 5, size=(4, 12, 10)).long(), 1),
(torch.rand(100, 3, 8, 8), torch.randint(0, 3, size=(100, 8, 8)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multiclass_input(n_times, test_data_multiclass):
acc = Accuracy()
y_pred, y, batch_size = test_data_multiclass
acc.reset()
if batch_size > 1:
# Batched Updates
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
np_y = y.numpy().ravel()
assert acc._type == "multiclass"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
def test_multilabel_wrong_inputs():
acc = Accuracy(is_multilabel=True)
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible y_pred
acc.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible y
acc.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
with pytest.raises(ValueError):
# incompatible binary shapes
acc.update((torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)).long()))
@pytest.fixture(params=range(12))
def test_data_multilabel(request):
return [
# Multilabel input data of shape (N, C) and (N, C)
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 1),
(torch.randint(0, 2, size=(10, 7)).long(), torch.randint(0, 2, size=(10, 7)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 4)).long(), torch.randint(0, 2, size=(50, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
# Multilabel input data of shape (N, H, W)
(torch.randint(0, 2, size=(10, 5, 10)).long(), torch.randint(0, 2, size=(10, 5, 10)).long(), 1),
(torch.randint(0, 2, size=(10, 4, 10)).long(), torch.randint(0, 2, size=(10, 4, 10)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)).long(), torch.randint(0, 2, size=(50, 5, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 4, 10)).long(), torch.randint(0, 2, size=(50, 4, 10)).long(), 16),
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
(torch.randint(0, 2, size=(4, 5, 12, 10)).long(), torch.randint(0, 2, size=(4, 5, 12, 10)).long(), 1),
(torch.randint(0, 2, size=(4, 10, 12, 8)).long(), torch.randint(0, 2, size=(4, 10, 12, 8)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 12, 10)).long(), torch.randint(0, 2, size=(50, 5, 12, 10)).long(), 16),
(torch.randint(0, 2, size=(50, 10, 12, 8)).long(), torch.randint(0, 2, size=(50, 10, 12, 8)).long(), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_multilabel_input(n_times, test_data_multilabel):
acc = Accuracy(is_multilabel=True)
y_pred, y, batch_size = test_data_multilabel
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
acc.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert acc._type == "multilabel"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
def test_incorrect_type():
acc = Accuracy()
# Start as binary data
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
acc.update((y_pred, y))
# And add a multiclass data
y_pred = torch.rand(4, 4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
acc.update((y_pred, y))
def _test_distrib_multilabel_input_NHW(device):
# Multilabel input data of shape (N, C, H, W, ...) and (N, C, H, W, ...)
rank = idist.get_rank()
def _test(metric_device):
metric_device = torch.device(metric_device)
acc = Accuracy(is_multilabel=True, device=metric_device)
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(4, 5, 8, 10), device=device).long()
y = torch.randint(0, 2, size=(4, 5, 8, 10), device=device).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
acc.reset()
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(4, 7, 10, 8), device=device).long()
y = torch.randint(0, 2, size=(4, 7, 10, 8), device=device).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, H, W, ...) -> (N * H * W ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# check that result is not changed
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# Batched Updates
acc.reset()
torch.manual_seed(10 + rank)
y_pred = torch.randint(0, 2, size=(80, 5, 8, 10), device=device).long()
y = torch.randint(0, 2, size=(80, 5, 8, 10), device=device).long()
batch_size = 16
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
n = acc._num_examples
assert n == y.numel() / y.size(dim=1)
# gather y_pred, y
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
np_y_pred = to_numpy_multilabel(y_pred.cpu()) # (N, C, L, ...) -> (N * L * ..., C)
np_y = to_numpy_multilabel(y.cpu()) # (N, C, L, ...) -> (N * L ..., C)
assert acc._type == "multilabel"
res = acc.compute()
assert n == acc._num_examples
assert isinstance(res, float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(res)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_integration_multiclass(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_integration_multilabel(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 8, 10)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 8, 10)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
acc = Accuracy(is_multilabel=True, device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(to_numpy_multilabel(y_true), to_numpy_multilabel(y_preds))
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
acc = Accuracy(device=metric_device)
assert acc._device == metric_device
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_pred = torch.randint(0, 2, size=(10,), device=device, dtype=torch.long)
y = torch.randint(0, 2, size=(10,), device=device, dtype=torch.long)
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
def _test_distrib_integration_list_of_tensors_or_numbers(device):
rank = idist.get_rank()
def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
batch_size = 16
n_classes = 10
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(_, i):
return (
[v for v in y_preds[i * batch_size : (i + 1) * batch_size, ...]],
[v.item() for v in y_true[i * batch_size : (i + 1) * batch_size]],
)
engine = Engine(update)
acc = Accuracy(device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_true = idist.all_gather(y_true)
y_preds = idist.all_gather(y_preds)
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = accuracy_score(y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy())
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_multilabel_input_NHW, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multiclass, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multilabel, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_list_of_tensors_or_numbers, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_multilabel_input_NHW(device)
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_integration_list_of_tensors_or_numbers(device)
|
import os
import sys
import time
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics import Frequency
if sys.platform.startswith("darwin"):
pytest.skip("Skip if on MacOS", allow_module_level=True)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_nondistributed_average():
artificial_time = 1 # seconds
num_tokens = 100
average_upper_bound = num_tokens / artificial_time
average_lower_bound = average_upper_bound * 0.9
freq_metric = Frequency()
freq_metric.reset()
time.sleep(artificial_time)
freq_metric.update(num_tokens)
average = freq_metric.compute()
assert average_lower_bound < average < average_upper_bound
def _test_frequency_with_engine(workers=None, lower_bound_factor=0.8, upper_bound_factor=1.1, every=1):
if workers is None:
workers = idist.get_world_size()
artificial_time = 1.0 / workers # seconds
total_tokens = 400 // workers
batch_size = 128 // workers
estimated_wps = batch_size * workers / artificial_time
def update_fn(engine, batch):
time.sleep(artificial_time)
return {"ntokens": len(batch)}
engine = Engine(update_fn)
wps_metric = Frequency(output_transform=lambda x: x["ntokens"])
event = Events.ITERATION_COMPLETED(every=every)
wps_metric.attach(engine, "wps", event_name=event)
@engine.on(event)
def assert_wps(e):
wps = e.state.metrics["wps"]
# Skip iterations 2, 3, 4 if backend is Horovod on CUDA,
# wps is abnormally low for these iterations
# otherwise, other values of wps are OK
if idist.model_name() == "horovod-dist" and e.state.iteration in (2, 3, 4):
return
low_wps = estimated_wps * lower_bound_factor
high_wps = estimated_wps * upper_bound_factor
assert low_wps < wps <= high_wps, f"{e.state.iteration}: {low_wps} < {wps} <= {high_wps}"
data = [[i] * batch_size for i in range(0, total_tokens, batch_size)]
engine.run(data, max_epochs=2)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Skip on Windows")
def test_frequency_with_engine():
_test_frequency_with_engine(workers=1)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_frequency_with_engine_distributed(distributed_context_single_node_gloo):
_test_frequency_with_engine(workers=idist.get_world_size())
def test_frequency_with_engine_with_every():
_test_frequency_with_engine(workers=1, every=1)
_test_frequency_with_engine(workers=1, every=10)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_frequency_with_engine_distributed_with_every(distributed_context_single_node_gloo):
_test_frequency_with_engine(workers=idist.get_world_size(), every=1)
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_frequency_with_engine, (None, 0.8, 1), np=nproc, do_init=True)
gloo_hvd_executor(_test_frequency_with_engine, (None, 0.8, 10), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
def _test_distrib_xla_nprocs(index):
_test_frequency_with_engine(workers=idist.get_world_size(), every=10)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from torch.nn import Linear
from torch.optim import SGD
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.accumulation import Average, GeometricAverage, VariableAccumulation
torch.manual_seed(15)
def test_variable_accumulation_wrong_inputs():
with pytest.raises(TypeError, match=r"Argument op should be a callable"):
VariableAccumulation(1)
with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
mean_acc = VariableAccumulation(lambda a, x: a + x)
mean_acc.update((1, 2))
with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
mean_acc = VariableAccumulation(lambda a, x: a + x)
mean_acc.update("a")
def test_variable_accumulation_mean_variable():
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true)
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100, 10)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
assert n == len(y_true)
mean_var = VariableAccumulation(lambda a, x: a + x.sum(dim=0))
# iterate by batch of 16 samples
y_true = torch.rand(8, 16, 10)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.numpy() == pytest.approx(y_true.reshape(-1, 10).sum(dim=0).numpy())
assert n == y_true.shape[0] * y_true.shape[1]
def test_average():
with pytest.raises(NotComputableError):
v = Average()
v.compute()
mean_var = Average()
y_true = torch.rand(100) + torch.randint(0, 10, size=(100,)).float()
for y in y_true:
mean_var.update(y.item())
m = mean_var.compute()
assert m.item() == pytest.approx(y_true.mean().item())
mean_var = Average()
y_true = torch.rand(100, 10) + torch.randint(0, 10, size=(100, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
assert m.numpy() == pytest.approx(y_true.mean(dim=0).numpy())
mean_var = Average()
y_true = torch.rand(8, 16, 10) + torch.randint(0, 10, size=(8, 16, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
assert m.numpy() == pytest.approx(y_true.reshape(-1, 10).mean(dim=0).numpy())
def _geom_mean(t):
np_t = t.numpy()
return np.exp(np.mean(np.log(np_t), axis=0))
def _mean(y_true):
return y_true.mean(dim=0).numpy()
def test_geom_average():
with pytest.raises(NotComputableError):
v = GeometricAverage()
v.compute()
mean_var = GeometricAverage()
y_true = torch.rand(100) + torch.randint(0, 10, size=(100,)).float()
for y in y_true:
mean_var.update(y.item())
m = mean_var.compute()
assert m == pytest.approx(_geom_mean(y_true))
mean_var = GeometricAverage()
y_true = torch.rand(100, 10) + torch.randint(0, 10, size=(100, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
np.testing.assert_almost_equal(m.numpy(), _geom_mean(y_true), decimal=5)
mean_var = GeometricAverage()
y_true = torch.rand(8, 16, 10) + torch.randint(0, 10, size=(8, 16, 10)).float()
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
np.testing.assert_almost_equal(m.numpy(), _geom_mean(y_true.reshape(-1, 10)), decimal=5)
@pytest.mark.parametrize("metric_cls, true_result_fn", [(Average, _mean), (GeometricAverage, _geom_mean)])
@pytest.mark.parametrize("shape", [[100, 12], [100]])
def test_integration(metric_cls, true_result_fn, shape):
assert len(shape) > 0 and len(shape) < 3
custom_variable = 10.0 + 5.0 * torch.rand(shape)
def update_fn(engine, batch):
output = custom_variable[engine.state.iteration - 1]
output = output.item() if output.ndimension() < 1 else output
return 0, output
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1])
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * shape[0])
np.testing.assert_almost_equal(
np.array(state.metrics["agg_custom_var"]), true_result_fn(custom_variable), decimal=5
)
def test_compute_mean_std():
n = 8
b = 12
c = 3
w = h = 64
true_data = np.arange(0, n * b * h * w * c, dtype="float64").reshape(n * b, c, h, w) - (n * b * c * w * h * 0.75)
mean = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).mean(axis=0)
std = true_data.transpose((0, 2, 3, 1)).reshape(-1, c).std(axis=0)
train_loader = torch.from_numpy(true_data).reshape(n, b, c, h, w)
def compute_mean_std(engine, batch):
_b, _c = batch.shape[:2]
data = batch.reshape(_b, _c, -1).to(dtype=torch.float64)
_mean = torch.mean(data, dim=-1)
_mean2 = torch.mean(data**2, dim=-1)
return {"mean": _mean, "mean^2": _mean2}
compute_engine = Engine(compute_mean_std)
img_mean = Average(output_transform=lambda output: output["mean"])
img_mean2 = Average(output_transform=lambda output: output["mean^2"])
img_mean.attach(compute_engine, "mean")
img_mean2.attach(compute_engine, "mean2")
state = compute_engine.run(train_loader)
state.metrics["std"] = torch.sqrt(state.metrics["mean2"] - state.metrics["mean"] ** 2)
np.testing.assert_almost_equal(state.metrics["mean"].numpy(), mean, decimal=7)
np.testing.assert_almost_equal(state.metrics["std"].numpy(), std, decimal=5)
def _test_distrib_variable_accumulation(device):
def _test(metric_device):
mean_var = VariableAccumulation(lambda a, x: a + x, device=metric_device)
y_true = torch.rand(100, device=device, dtype=torch.float64)
for y in y_true:
mean_var.update(y)
y_true = idist.all_reduce(y_true)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true) * idist.get_world_size()
# check if call compute twice
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true) * idist.get_world_size()
mean_var = VariableAccumulation(lambda a, x: a + x, device=metric_device)
y_true = torch.rand(50, 10, device=device, dtype=torch.float64)
for y in y_true:
mean_var.update(y)
y_true = idist.all_reduce(y_true)
a, n = mean_var.compute()
assert n == len(y_true) * idist.get_world_size()
np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4)
a, n = mean_var.compute()
assert n == len(y_true) * idist.get_world_size()
np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_average(device):
def _test(metric_device):
with pytest.raises(NotComputableError):
v = Average(device=metric_device)
v.compute()
mean_var = Average(device=metric_device)
y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100,)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
y_true = idist.all_reduce(y_true)
assert m.item() == pytest.approx(y_true.mean().item() / idist.get_world_size())
mean_var = Average(device=metric_device)
y_true = torch.rand(100, 10, dtype=torch.float64) + torch.randint(0, 10, size=(100, 10)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
y_true = idist.all_reduce(y_true)
np.testing.assert_almost_equal(
m.cpu().numpy(), y_true.mean(dim=0).cpu().numpy() / idist.get_world_size(), decimal=5
)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_geom_average(device):
def _test(metric_device):
with pytest.raises(NotComputableError):
v = GeometricAverage(device=metric_device)
v.compute()
decimal = 5 if device.type != "xla" else 4
mean_var = GeometricAverage(device=metric_device)
y_true = torch.rand(100, dtype=torch.float64) + torch.randint(0, 10, size=(100,)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
np.testing.assert_almost_equal(
m, torch.exp(log_y_true.mean(dim=0) / idist.get_world_size()).item(), decimal=decimal
)
mean_var = GeometricAverage(device=metric_device)
y_true = torch.rand(100, 10, dtype=torch.float64) + torch.randint(0, 10, size=(100, 10)).double()
y_true = y_true.to(device)
for y in y_true:
mean_var.update(y)
m = mean_var.compute()
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
np.testing.assert_almost_equal(
m.cpu().numpy(), torch.exp(log_y_true.mean(dim=0) / idist.get_world_size()).cpu().numpy(), decimal=decimal
)
# check multiple random inputs as random exact occurencies are rare
for _ in range(3):
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _dist_mean(y_true):
y_true = idist.all_reduce(y_true) / idist.get_world_size()
if len(y_true.shape) > 2:
y_true = y_true.reshape(-1, y_true.shape[-1])
return y_true.mean(dim=0).cpu().numpy()
def _dist_geom_mean(y_true):
log_y_true = torch.log(y_true)
log_y_true = idist.all_reduce(log_y_true)
if len(log_y_true.shape) > 2:
log_y_true = log_y_true.reshape(-1, log_y_true.shape[-1])
np_t = log_y_true.cpu().numpy()
return np.exp(np.mean(np_t, axis=0) / idist.get_world_size())
def _test_distrib_integration(device):
def _test(metric_cls, shape, true_result_fn, metric_device, tol=1e-5):
size = 100
custom_variable = 10.0 + 5.0 * torch.rand(size, *shape, dtype=torch.float64)
custom_variable = custom_variable.to(device)
def update_fn(engine, batch):
return 0, custom_variable[engine.state.iteration - 1]
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=metric_device)
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * size)
true_val = true_result_fn(custom_variable)
assert len(true_val) == shape[-1]
np.testing.assert_almost_equal(
state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol))
)
size = 100
custom_variable = 10.0 + 5.0 * torch.rand(size, dtype=torch.float64)
custom_variable = custom_variable.to(device)
def update_fn(engine, batch):
return 0, custom_variable[engine.state.iteration - 1].item()
engine = Engine(update_fn)
custom_var_mean = metric_cls(output_transform=lambda output: output[1], device=metric_device)
custom_var_mean.attach(engine, "agg_custom_var")
state = engine.run([0] * size)
assert state.metrics["agg_custom_var"] == pytest.approx(true_result_fn(custom_variable), abs=tol)
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(Average, (12,), _dist_mean, metric_device)
_test(Average, (4, 12), _dist_mean, metric_device)
_test(GeometricAverage, (12,), _dist_geom_mean, metric_device, tol=1e-4)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
m = VariableAccumulation(lambda a, x: x, device=metric_device)
assert m._device == metric_device
assert (
m.accumulator.device == metric_device
), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
m.update(torch.tensor(1, device=device))
assert (
m.accumulator.device == metric_device
), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
def _test_apex_average(device, amp_mode, opt_level):
assert amp_mode == "apex"
assert device == "cuda"
model = Linear(1, 1)
if device:
model.to(device)
model.weight.data.zero_()
model.bias.data.zero_()
optimizer = SGD(model.parameters(), 0.1)
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
mean_var = VariableAccumulation(lambda a, x: a + x)
y_true = torch.rand(100).float().to(device)
for y in y_true:
mean_var.update(y)
a, n = mean_var.compute()
assert a.item() == pytest.approx(y_true.sum().item())
assert n == len(y_true)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = idist.device()
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_variable_accumulation, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_average, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_geom_average, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
# Enable this test when apex issue is fixed
# @pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
# @pytest.mark.skipif(not find_spec("apex"), reason="Skip if no APEX")
@pytest.mark.skip(reason="Temporarily disabled, as it fails because of an issue from apex side")
def test_apex_average_on_cuda():
device = "cuda"
_test_apex_average(device, amp_mode="apex", opt_level="O0")
_test_apex_average(device, amp_mode="apex", opt_level="O1")
_test_apex_average(device, amp_mode="apex", opt_level="O2")
_test_apex_average(device, amp_mode="apex", opt_level="O3")
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_variable_accumulation(device)
_test_distrib_average(device)
_test_distrib_geom_average(device)
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
|
import os
from unittest.mock import MagicMock
import pytest
import torch
from numpy.testing import assert_almost_equal
from torch import nn
from torch.nn.functional import nll_loss
import ignite.distributed as idist
from ignite.engine import State
from ignite.exceptions import NotComputableError
from ignite.metrics import Loss, Precision
class DummyLoss1(Loss):
def __init__(self, loss_fn, true_output, output_transform=lambda x: x):
super(DummyLoss1, self).__init__(loss_fn, output_transform=output_transform)
print(true_output)
self.true_output = true_output
def reset(self):
pass
def compute(self):
pass
def update(self, output):
assert output == self.true_output
def test_output_as_mapping_without_criterion_kwargs():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {}
loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)
def test_output_as_mapping_with_criterion_kwargs():
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {"reduction": "sum"}
loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {"reduction": "sum"}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)
def y_test_1(requires_grad=False, device=None):
return (
torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]], device=device, requires_grad=requires_grad).log(),
torch.tensor([2, 2], device=device).long(),
1.1512925625,
)
def y_test_2():
return (
torch.tensor([[0.1, 0.3, 0.6], [0.6, 0.2, 0.2], [0.2, 0.7, 0.1]]).log(),
torch.tensor([2, 0, 2]).long(),
1.1253643036,
)
def y_test_3():
return torch.tensor([[0.1, 0.3, 0.6], [0.6, 0.2, 0.2]]).log(), torch.tensor([2, 0]).long()
def test_zero_div():
loss = Loss(nll_loss)
with pytest.raises(NotComputableError, match=r"Loss must have at least one example before it can be computed"):
loss.compute()
@pytest.mark.parametrize("criterion", [nll_loss, nn.NLLLoss()])
def test_compute(criterion):
loss = Loss(criterion)
y_pred, y, expected_loss = y_test_1()
loss.update((y_pred, y))
assert_almost_equal(loss.compute(), expected_loss)
y_pred, y, expected_loss = y_test_2()
loss.update((y_pred, y))
assert_almost_equal(loss.compute(), expected_loss) # average
def test_non_averaging_loss():
loss = Loss(nn.NLLLoss(reduction="none"))
y_pred, y, _ = y_test_1()
with pytest.raises(ValueError):
loss.update((y_pred, y))
def test_gradient_based_loss():
# Tests https://github.com/pytorch/ignite/issues/1674
x = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]], requires_grad=True)
y_pred = x.mm(torch.randn(size=(3, 1)))
def loss_fn(y_pred, x):
gradients = torch.autograd.grad(
outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True
)[0]
gradients = gradients.flatten(start_dim=1)
return gradients.norm(2, dim=1).mean()
loss = Loss(loss_fn)
loss.update((y_pred, x))
def test_kwargs_loss():
loss = Loss(nll_loss)
y_pred, y, _ = y_test_1()
kwargs = {"weight": torch.tensor([0.1, 0.1, 0.1])}
loss.update((y_pred, y, kwargs))
expected_value = nll_loss(y_pred, y, **kwargs)
assert_almost_equal(loss.compute(), expected_value)
def test_reset():
loss = Loss(nll_loss)
y_pred, y = y_test_3()
loss.update((y_pred, y))
loss.compute()
loss.reset()
with pytest.raises(NotComputableError):
loss.compute()
def _test_distrib_compute_on_criterion(device, y_test_1, y_test_2, tol=None):
def _test(metric_device, y_test_1, y_test_2):
criterion = nn.NLLLoss().to(device)
loss = Loss(criterion, device=metric_device)
y_pred, y, _ = y_test_1
loss.update((y_pred, y))
n = loss._num_examples
assert n == len(y)
res = loss.compute()
assert n == loss._num_examples
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
true_loss_value = criterion(y_pred, y)
assert_almost_equal(res, true_loss_value.item())
loss.reset()
y_pred, y, _ = y_test_2
loss.update((y_pred, y))
n = loss._num_examples
res = loss.compute()
assert n == loss._num_examples
y_pred = idist.all_gather(y_pred)
y = idist.all_gather(y)
true_loss_value = criterion(y_pred, y)
if tol is None:
assert_almost_equal(res, true_loss_value.item())
else:
assert pytest.approx(res, rel=tol) == true_loss_value.item()
_test("cpu", y_test_1, y_test_2)
if device.type != "xla":
_test(idist.device(), y_test_1, y_test_2)
def _test_distrib_accumulator_device(device, y_test_1):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
loss = Loss(nll_loss, device=metric_device)
assert loss._device == metric_device
assert (
loss._sum.device == metric_device
), f"{type(loss._sum.device)}:{loss._sum.device} vs {type(metric_device)}:{metric_device}"
y_pred, y, _ = y_test_1
loss.update((y_pred, y))
assert (
loss._sum.device == metric_device
), f"{type(loss._sum.device)}:{loss._sum.device} vs {type(metric_device)}:{metric_device}"
def test_sum_detached():
loss = Loss(nll_loss)
y_pred, y, _ = y_test_1(requires_grad=True)
loss.update((y_pred, y))
assert not loss._sum.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_compute_on_criterion, (device, y_test_1(), y_test_2()), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device, y_test_1()), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2(), tol=1e-6)
_test_distrib_accumulator_device(device, y_test_1())
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())
def test_override_required_output_keys():
# https://github.com/pytorch/ignite/issues/1415
from ignite.engine import create_supervised_evaluator
counter = [0]
class DummyLoss2(Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, criterion_kwargs = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert criterion_kwargs == c_kwargs
assert y.equal(data[counter[0]][1])
counter[0] += 1
def reset(self):
pass
def compute(self):
pass
model = nn.Linear(10, 3)
metrics = {"Precision": Precision(), "DummyLoss2": DummyLoss2(nll_loss)}
# global criterion kwargs
c_kwargs = {"reduction": "sum"}
evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": c_kwargs},
)
data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)
|
import os
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import Recall
torch.manual_seed(12)
def test_no_update():
recall = Recall()
assert recall._updated is False
with pytest.raises(NotComputableError, match=r"Recall must have at least one example before it can be computed"):
recall.compute()
assert recall._updated is False
recall = Recall(is_multilabel=True)
assert recall._updated is False
with pytest.raises(NotComputableError, match=r"Recall must have at least one example before it can be computed"):
recall.compute()
assert recall._updated is False
def test_average_parameter():
re = Recall(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
re.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
re = Recall(average="samples")
with pytest.raises(
ValueError, match=r"Argument average='samples' is incompatible with binary and multiclass input data."
):
re.update((torch.rand(10, 3), torch.randint(0, 3, size=(10,)).long()))
assert re._updated is False
re = Recall(average=True)
assert re._average == "macro"
def test_binary_wrong_inputs():
re = Recall()
assert re._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y must be comprised of 0's and 1's"):
# y has not only 0 or 1 values
re.update((torch.randint(0, 2, size=(10,)), torch.arange(0, 10).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
re.update((torch.rand(10, 1), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10, 5, 6)), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10, 5, 6)).long()))
assert re._updated is False
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
re = Recall(average=None)
re.update((torch.randint(0, 2, size=(10,)).float(), torch.randint(0, 2, size=(10,))))
with pytest.warns(
RuntimeWarning,
match="`y` and `y_pred` should be of dtype long when entry type is binary and average!=False",
):
re = Recall(average=None)
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).float()))
def ignite_average_to_scikit_average(average, data_type: str):
if average in [None, "micro", "samples", "weighted", "macro"]:
return average
if average is False:
if data_type == "binary":
return "binary"
else:
return None
elif average is True:
return "macro"
else:
raise ValueError(f"Wrong average parameter `{average}`")
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_binary_input(average):
re = Recall(average=average)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert re._type == "binary"
assert re._updated is True
assert isinstance(re.compute(), torch.Tensor if not average else float)
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "binary")
assert recall_score(np_y, np_y_pred, average=sk_average_parameter, labels=[0, 1]) == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Binary accuracy on input of shape (N, 1) or (N, )
(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)), 1),
(torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)), 1),
# updated batches
(torch.randint(0, 2, size=(50,)), torch.randint(0, 2, size=(50,)), 16),
(torch.randint(0, 2, size=(50, 1)), torch.randint(0, 2, size=(50, 1)), 16),
# Binary accuracy on input of shape (N, L)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 1, 5)), torch.randint(0, 2, size=(10, 1, 5)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 1, 5)), torch.randint(0, 2, size=(50, 1, 5)), 16),
# Binary accuracy on input of shape (N, H, W)
(torch.randint(0, 2, size=(10, 12, 10)), torch.randint(0, 2, size=(10, 12, 10)), 1),
(torch.randint(0, 2, size=(10, 1, 12, 10)), torch.randint(0, 2, size=(10, 1, 12, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 12, 10)), torch.randint(0, 2, size=(50, 12, 10)), 16),
(torch.randint(0, 2, size=(50, 1, 12, 10)), torch.randint(0, 2, size=(50, 1, 12, 10)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10,), dtype=torch.long), torch.randint(0, 2, size=(10,)), 1),
(torch.zeros(size=(10, 1), dtype=torch.long), torch.randint(0, 2, size=(10, 1)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multiclass_wrong_inputs():
re = Recall()
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
assert re._updated is False
re = Recall(average=True)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
re.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert re._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
re.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert re._updated is True
re = Recall(average=False)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).long()))
re.update((torch.rand(10, 6), torch.randint(0, 5, size=(10,)).long()))
assert re._updated is True
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.rand(10, 5, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
re.update((torch.rand(10, 6, 12, 14), torch.randint(0, 5, size=(10, 12, 14)).long()))
assert re._updated is True
with pytest.warns(
RuntimeWarning,
match="`y` should be of dtype long when entry type is multiclass",
):
re = Recall()
re.update((torch.rand(10, 5), torch.randint(0, 5, size=(10,)).float()))
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_multiclass_input(average):
re = Recall(average=average)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
num_classes = y_pred.shape[1]
np_y_pred = y_pred.argmax(dim=1).numpy().ravel()
np_y = y.numpy().ravel()
assert re._type == "multiclass"
assert re._updated is True
assert isinstance(re.compute(), torch.Tensor if not average else float)
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
sk_compute = recall_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter)
assert sk_compute == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Multiclass input data of shape (N, ) and (N, C)
(torch.rand(10, 6), torch.randint(0, 6, size=(10,)), 1),
(torch.rand(10, 4), torch.randint(0, 4, size=(10,)), 1),
# updated batches
(torch.rand(50, 6), torch.randint(0, 6, size=(50,)), 16),
(torch.rand(50, 4), torch.randint(0, 4, size=(50,)), 16),
# Multiclass input data of shape (N, L) and (N, C, L)
(torch.rand(10, 5, 8), torch.randint(0, 5, size=(10, 8)), 1),
(torch.rand(10, 8, 12), torch.randint(0, 8, size=(10, 12)), 1),
# updated batches
(torch.rand(50, 5, 8), torch.randint(0, 5, size=(50, 8)), 16),
(torch.rand(50, 8, 12), torch.randint(0, 8, size=(50, 12)), 16),
# Multiclass input data of shape (N, H, W, ...) and (N, C, H, W, ...)
(torch.rand(10, 5, 18, 16), torch.randint(0, 5, size=(10, 18, 16)), 1),
(torch.rand(10, 7, 20, 12), torch.randint(0, 7, size=(10, 20, 12)), 1),
# updated batches
(torch.rand(50, 5, 18, 16), torch.randint(0, 5, size=(50, 18, 16)), 16),
(torch.rand(50, 7, 20, 12), torch.randint(0, 7, size=(50, 20, 12)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 6)), torch.randint(0, 6, size=(10,)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 4, size=(10,)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
def test_multilabel_wrong_inputs():
re = Recall(is_multilabel=True)
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes
re.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible y_pred
re.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible y
re.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
assert re._updated is False
with pytest.raises(ValueError):
# incompatible shapes between two updates
re.update((torch.randint(0, 2, size=(20, 5)), torch.randint(0, 2, size=(20, 5)).long()))
re.update((torch.randint(0, 2, size=(20, 6)), torch.randint(0, 2, size=(20, 6)).long()))
assert re._updated is True
def to_numpy_multilabel(y):
# reshapes input array to (N x ..., C)
y = y.transpose(1, 0).cpu().numpy()
num_classes = y.shape[0]
y = y.reshape((num_classes, -1)).transpose(1, 0)
return y
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "samples"])
def test_multilabel_input(average):
re = Recall(average=average, is_multilabel=True)
assert re._updated is False
def _test(y_pred, y, batch_size):
re.reset()
assert re._updated is False
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
re.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
re.update((y_pred, y))
np_y_pred = to_numpy_multilabel(y_pred)
np_y = to_numpy_multilabel(y)
assert re._type == "multilabel"
assert re._updated is True
re_compute = re.compute().numpy() if not average else re.compute()
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert recall_score(np_y, np_y_pred, average=sk_average_parameter) == pytest.approx(re_compute)
def get_test_cases():
test_cases = [
# Multilabel input data of shape (N, C)
(torch.randint(0, 2, size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.randint(0, 2, size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5)), torch.randint(0, 2, size=(50, 5)), 16),
(torch.randint(0, 2, size=(50, 4)), torch.randint(0, 2, size=(50, 4)), 16),
# Multilabel input data of shape (N, H, W)
(torch.randint(0, 2, size=(10, 5, 10)), torch.randint(0, 2, size=(10, 5, 10)), 1),
(torch.randint(0, 2, size=(10, 4, 10)), torch.randint(0, 2, size=(10, 4, 10)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 10)), torch.randint(0, 2, size=(50, 5, 10)), 16),
(torch.randint(0, 2, size=(50, 4, 10)), torch.randint(0, 2, size=(50, 4, 10)), 16),
# Multilabel input data of shape (N, C, H, W, ...)
(torch.randint(0, 2, size=(10, 5, 18, 16)), torch.randint(0, 2, size=(10, 5, 18, 16)), 1),
(torch.randint(0, 2, size=(10, 4, 20, 23)), torch.randint(0, 2, size=(10, 4, 20, 23)), 1),
# updated batches
(torch.randint(0, 2, size=(50, 5, 18, 16)), torch.randint(0, 2, size=(50, 5, 18, 16)), 16),
(torch.randint(0, 2, size=(50, 4, 20, 23)), torch.randint(0, 2, size=(50, 4, 20, 23)), 16),
# Corner case with all zeros predictions
(torch.zeros(size=(10, 5)), torch.randint(0, 2, size=(10, 5)), 1),
(torch.zeros(size=(10, 4)), torch.randint(0, 2, size=(10, 4)), 1),
]
return test_cases
for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_type(average):
# Tests changing of type during training
re = Recall(average=average)
assert re._updated is False
y_pred = torch.softmax(torch.rand(4, 4), dim=1)
y = torch.ones(4).long()
re.update((y_pred, y))
assert re._updated is True
y_pred = torch.zeros(4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
re.update((y_pred, y))
assert re._updated is True
@pytest.mark.parametrize("average", [None, False, "macro", "micro", "weighted"])
def test_incorrect_y_classes(average):
re = Recall(average=average)
assert re._updated is False
y_pred = torch.randint(0, 2, size=(10, 4)).float()
y = torch.randint(4, 5, size=(10,)).long()
with pytest.raises(ValueError):
re.update((y_pred, y))
assert re._updated is False
def _test_distrib_integration_multiclass(device):
from ignite.engine import Engine
def _test(average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
re = Recall(average=average, device=metric_device)
re.attach(engine, "re")
assert re._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "re" in engine.state.metrics
assert re._updated is True
res = engine.state.metrics["re"]
if isinstance(res, torch.Tensor):
# Fixes https://github.com/pytorch/ignite/issues/1635#issuecomment-863026919
assert res.device.type == "cpu"
res = res.cpu().numpy()
sk_average_parameter = ignite_average_to_scikit_average(average, "multiclass")
true_res = recall_score(
y_true.cpu().numpy(), torch.argmax(y_preds, dim=1).cpu().numpy(), average=sk_average_parameter
)
assert pytest.approx(res) == true_res
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
def _test_distrib_integration_multilabel(device):
from ignite.engine import Engine
torch.manual_seed(12)
def _test(average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
y_true = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
y_preds = torch.randint(0, 2, size=(n_iters * batch_size, n_classes, 6, 8)).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, ...],
y_true[i * batch_size : (i + 1) * batch_size, ...],
)
engine = Engine(update)
re = Recall(average=average, is_multilabel=True, device=metric_device)
re.attach(engine, "re")
assert re._updated is False
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "re" in engine.state.metrics
assert re._updated is True
res = engine.state.metrics["re"]
res2 = re.compute()
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
res2 = res2.cpu().numpy()
assert (res == res2).all()
else:
assert res == res2
np_y_preds = to_numpy_multilabel(y_preds)
np_y_true = to_numpy_multilabel(y_true)
assert re._type == "multilabel"
sk_average_parameter = ignite_average_to_scikit_average(average, "multilabel")
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
assert recall_score(np_y_true, np_y_preds, average=sk_average_parameter) == pytest.approx(res)
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(2):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(average=False, n_epochs=1, metric_device=metric_device)
_test(average=False, n_epochs=2, metric_device=metric_device)
_test(average="macro", n_epochs=1, metric_device=metric_device)
_test(average="macro", n_epochs=2, metric_device=metric_device)
_test(average="micro", n_epochs=1, metric_device=metric_device)
_test(average="micro", n_epochs=2, metric_device=metric_device)
_test(average="weighted", n_epochs=1, metric_device=metric_device)
_test(average="weighted", n_epochs=2, metric_device=metric_device)
_test(average="samples", n_epochs=1, metric_device=metric_device)
_test(average="samples", n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
# Binary accuracy on input of shape (N, 1) or (N, )
def _test(average, metric_device):
re = Recall(average=average, device=metric_device)
assert re._device == metric_device
assert re._updated is False
# Since the shape of the accumulated amount isn't known before the first update
# call, the internal variables aren't tensors on the right device yet.
y_reed = torch.randint(0, 2, size=(10,))
y = torch.randint(0, 2, size=(10,)).long()
re.update((y_reed, y))
assert re._updated is True
assert (
re._numerator.device == metric_device
), f"{type(re._numerator.device)}:{re._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
re._denominator.device == metric_device
), f"{type(re._denominator.device)}:{re._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert re._weight.device == metric_device, f"{type(re._weight.device)}:{re._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
def _test_distrib_multilabel_accumulator_device(device):
# Multiclass input data of shape (N, ) and (N, C)
def _test(average, metric_device):
re = Recall(is_multilabel=True, average=average, device=metric_device)
assert re._updated is False
assert re._device == metric_device
y_reed = torch.randint(0, 2, size=(10, 4, 20, 23))
y = torch.randint(0, 2, size=(10, 4, 20, 23)).long()
re.update((y_reed, y))
assert re._updated is True
assert (
re._numerator.device == metric_device
), f"{type(re._numerator.device)}:{re._numerator.device} vs {type(metric_device)}:{metric_device}"
if average != "samples":
# For average='samples', `_denominator` is of type `int` so it has not `device` member.
assert (
re._denominator.device == metric_device
), f"{type(re._denominator.device)}:{re._denominator.device} vs {type(metric_device)}:{metric_device}"
if average == "weighted":
assert re._weight.device == metric_device, f"{type(re._weight.device)}:{re._weight.device} vs "
f"{type(metric_device)}:{metric_device}"
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(False, metric_device=metric_device)
_test("macro", metric_device=metric_device)
_test("micro", metric_device=metric_device)
_test("weighted", metric_device=metric_device)
_test("samples", metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration_multiclass, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multilabel, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_multilabel_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration_multiclass(device)
_test_distrib_integration_multilabel(device)
_test_distrib_accumulator_device(device)
_test_distrib_multilabel_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import numpy as np
import pytest
import torch
from sklearn.metrics import fbeta_score
import ignite.distributed as idist
from ignite.engine import Engine
from ignite.metrics import Fbeta, Precision, Recall
torch.manual_seed(12)
def test_wrong_inputs():
with pytest.raises(ValueError, match=r"Beta should be a positive integer"):
Fbeta(0.0)
with pytest.raises(ValueError, match=r"Input precision metric should have average=False"):
p = Precision(average="micro")
Fbeta(1.0, precision=p)
with pytest.raises(ValueError, match=r"Input recall metric should have average=False"):
r = Recall(average="samples")
Fbeta(1.0, recall=r)
with pytest.raises(ValueError, match=r"If precision argument is provided, output_transform should be None"):
p = Precision(average=False)
Fbeta(1.0, precision=p, output_transform=lambda x: x)
with pytest.raises(ValueError, match=r"If recall argument is provided, output_transform should be None"):
r = Recall(average=False)
Fbeta(1.0, recall=r, output_transform=lambda x: x)
def _output_transform(output):
return output["y_pred"], output["y"]
@pytest.mark.parametrize(
"p, r, average, output_transform",
[
(None, None, False, None),
(None, None, True, None),
(None, None, False, _output_transform),
(None, None, True, _output_transform),
(Precision(average=False), Recall(average=False), False, None),
(Precision(average=False), Recall(average=False), True, None),
],
)
def test_integration(p, r, average, output_transform):
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
if output_transform is not None:
return {"y_pred": torch.from_numpy(y_pred_batch), "y": torch.from_numpy(y_true_batch)}
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
f2 = Fbeta(beta=2.0, average=average, precision=p, recall=r, output_transform=output_transform)
f2.attach(evaluator, "f2")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
f2_true = fbeta_score(y_true, np.argmax(y_pred, axis=-1), average="macro" if average else None, beta=2.0)
np.testing.assert_allclose(np.array(f2_true), np.array(state.metrics["f2"]))
def _test_distrib_integration(device):
rank = idist.get_rank()
def _test(p, r, average, n_epochs, metric_device):
n_iters = 60
batch_size = 16
n_classes = 7
torch.manual_seed(12 + rank)
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
fbeta = Fbeta(beta=2.5, average=average, device=metric_device)
fbeta.attach(engine, "f2.5")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "f2.5" in engine.state.metrics
res = engine.state.metrics["f2.5"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = fbeta_score(
y_true.cpu().numpy(),
torch.argmax(y_preds, dim=1).cpu().numpy(),
beta=2.5,
average="macro" if average else None,
)
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
_test(None, None, average=True, n_epochs=1, metric_device=metric_device)
_test(None, None, average=True, n_epochs=2, metric_device=metric_device)
precision = Precision(average=False, device=metric_device)
recall = Recall(average=False, device=metric_device)
_test(precision, recall, average=False, n_epochs=1, metric_device=metric_device)
_test(precision, recall, average=False, n_epochs=2, metric_device=metric_device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
|
import os
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import MeanSquaredError
def test_zero_sample():
mse = MeanSquaredError()
with pytest.raises(
NotComputableError, match=r"MeanSquaredError must have at least one example before it can be computed"
):
mse.compute()
@pytest.fixture(params=[item for item in range(4)])
def test_case(request):
return [
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 1),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 1),
# updated batches
(torch.randint(0, 10, size=(100, 1)), torch.randint(0, 10, size=(100, 1)), 16),
(torch.randint(-20, 20, size=(100, 5)), torch.randint(-20, 20, size=(100, 5)), 16),
][request.param]
@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
mse = MeanSquaredError()
y_pred, y, batch_size = test_case
mse.reset()
if batch_size > 1:
n_iters = y.shape[0] // batch_size + 1
for i in range(n_iters):
idx = i * batch_size
mse.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
else:
mse.update((y_pred, y))
np_y = y.numpy()
np_y_pred = y_pred.numpy()
np_res = np.power((np_y - np_y_pred), 2.0).sum() / np_y.shape[0]
assert isinstance(mse.compute(), float)
assert mse.compute() == np_res
def _test_distrib_integration(device, tol=1e-6):
from ignite.engine import Engine
rank = idist.get_rank()
torch.manual_seed(12 + rank)
def _test(metric_device):
n_iters = 100
batch_size = 10
y_true = torch.arange(0, n_iters * batch_size, dtype=torch.float).to(device)
y_preds = torch.ones(n_iters * batch_size, dtype=torch.float).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
m = MeanSquaredError(device=metric_device)
m.attach(engine, "mse")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "mse" in engine.state.metrics
res = engine.state.metrics["mse"]
true_res = np.mean(np.power((y_true - y_preds).cpu().numpy(), 2.0))
assert pytest.approx(res, rel=tol) == true_res
_test("cpu")
if device.type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
device = torch.device(device)
mse = MeanSquaredError(device=metric_device)
for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
mse.update((y_pred, y))
for dev in [mse._device, mse._sum_of_squared_errors.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
def test_accumulator_detached():
mse = MeanSquaredError()
y_pred = torch.tensor([[2.0], [-2.0]], requires_grad=True)
y = torch.zeros(2)
mse.update((y_pred, y))
assert not mse._sum_of_squared_errors.requires_grad
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import TopKCategoricalAccuracy
def test_zero_div():
acc = TopKCategoricalAccuracy(2)
with pytest.raises(
NotComputableError, match=r"TopKCategoricalAccuracy must have at least one example before it can be computed"
):
acc.compute()
def test_compute():
acc = TopKCategoricalAccuracy(2)
y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 0.5
acc.reset()
y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 1.0
def top_k_accuracy(y_true, y_pred, k=5, normalize=True):
import numpy as np
# Taken from
# https://github.com/scikit-learn/scikit-learn/blob/4685cb5c50629aba4429f6701585f82fc3eee5f7/
# sklearn/metrics/classification.py#L187
if len(y_true.shape) == 2:
y_true = np.argmax(y_true, axis=1)
num_obs, num_labels = y_pred.shape
idx = num_labels - k - 1
counter = 0.0
argsorted = np.argsort(y_pred, axis=1)
for i in range(num_obs):
if y_true[i] in argsorted[i, idx + 1 :]:
counter += 1.0
if normalize:
return counter * 1.0 / num_obs
else:
return counter
def _test_distrib_integration(device):
from ignite.engine import Engine
def _test(n_epochs, metric_device):
n_iters = 100
batch_size = 16
n_classes = 10
y_true = torch.randint(0, n_classes, size=(n_iters * batch_size,)).to(device)
y_preds = torch.rand(n_iters * batch_size, n_classes).to(device)
def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size, :],
y_true[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
k = 5
acc = TopKCategoricalAccuracy(k=k, device=metric_device)
acc.attach(engine, "acc")
data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)
y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)
assert "acc" in engine.state.metrics
res = engine.state.metrics["acc"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()
true_res = top_k_accuracy(y_true.cpu().numpy(), y_preds.cpu().numpy(), k=k)
assert pytest.approx(res) == true_res
metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
rank = idist.get_rank()
for i in range(3):
torch.manual_seed(12 + rank + i)
for metric_device in metric_devices:
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
acc = TopKCategoricalAccuracy(2, device=metric_device)
assert acc._device == metric_device
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
y_pred = torch.tensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert (
acc._num_correct.device == metric_device
), f"{type(acc._num_correct.device)}:{acc._num_correct.device} vs {type(metric_device)}:{metric_device}"
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
|
from typing import Callable, Optional, Union
from unittest.mock import patch
import pytest
import torch
import torchvision
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
class DummyInceptionMetric(_BaseInceptionMetric):
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(DummyInceptionMetric, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
def reset(self):
pass
def compute(self):
pass
def update(self, output):
self._extract_features(output)
def test_dummy_metric():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
DummyInceptionMetric(num_features=-1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
DummyInceptionMetric(feature_extractor=torch.nn.Identity())
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module, got"):
DummyInceptionMetric(num_features=1000, feature_extractor=lambda x: x)
assert isinstance(DummyInceptionMetric(num_features=10)._feature_extractor, torch.nn.Identity)
def test_inception_extractor_wrong_inputs():
with pytest.raises(ValueError, match=r"Inputs should be a tensor of dim 4"):
InceptionModel(return_features=True)(torch.rand(2))
with pytest.raises(ValueError, match=r"Inputs should be a tensor with 3 channels"):
InceptionModel(return_features=True)(torch.rand(2, 2, 2, 0))
def test_inception_model_probability():
x = torch.rand(2, 3, 299, 299)
y = InceptionModel(return_features=False)(x)
assert pytest.approx(torch.sum(y[0]).item()) == 1.0
assert pytest.approx(torch.sum(y[1]).item()) == 1.0
assert torch.all(0 <= y)
@pytest.fixture()
def mock_no_torchvision():
with patch.dict("sys.modules", {"torchvision": None}):
yield torchvision
def test_no_torchvision(mock_no_torchvision):
with pytest.raises(ModuleNotFoundError, match=r"This module requires torchvision to be installed."):
InceptionModel(return_features=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
images = torch.rand(10, 3, 299, 299)
result = InceptionModel(return_features=False, device="cuda")(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 1000])
result = InceptionModel(return_features=False)(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 1000])
images = torch.rand(10, 5)
result = DummyInceptionMetric(num_features=5, device="cuda")._extract_features(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 5])
result = DummyInceptionMetric(num_features=5)._extract_features(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 5])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.