python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Ensemble posteriors. Used in conjunction with ensemble models.
"""
from __future__ import annotations
from typing import Optional
import torch
from botorch.posteriors.posterior import Posterior
from torch import Tensor
class EnsemblePosterior(Posterior):
r"""Ensemble posterior, that should be used for ensemble models that compute
eagerly a finite number of samples per X value as for example a deep ensemble
or a random forest."""
def __init__(self, values: Tensor) -> None:
r"""
Args:
values: Values of the samples produced by this posterior as
a `(b) x s x q x m` tensor where `m` is the output size of the
model and `s` is the ensemble size.
"""
if values.ndim < 3:
raise ValueError("Values has to be at least three-dimensional.")
self.values = values
@property
def ensemble_size(self) -> int:
r"""The size of the ensemble"""
return self.values.shape[-3]
@property
def weights(self) -> Tensor:
r"""The weights of the individual models in the ensemble.
Equally weighted by default."""
return torch.ones(self.ensemble_size) / self.ensemble_size
@property
def device(self) -> torch.device:
r"""The torch device of the posterior."""
return self.values.device
@property
def dtype(self) -> torch.dtype:
r"""The torch dtype of the posterior."""
return self.values.dtype
@property
def mean(self) -> Tensor:
r"""The mean of the posterior as a `(b) x n x m`-dim Tensor."""
return self.values.mean(dim=-3)
@property
def variance(self) -> Tensor:
r"""The variance of the posterior as a `(b) x n x m`-dim Tensor.
Computed as the sample variance across the ensemble outputs.
"""
if self.ensemble_size == 1:
return torch.zeros_like(self.values.squeeze(-3))
return self.values.var(dim=-3)
def _extended_shape(
self, sample_shape: torch.Size = torch.Size() # noqa: B008
) -> torch.Size:
r"""Returns the shape of the samples produced by the posterior with
the given `sample_shape`.
"""
return sample_shape + self.values.shape[:-3] + self.values.shape[-2:]
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
Based on the sample shape, base samples are generated and passed to
`rsample_from_base_samples`.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
if sample_shape is None:
sample_shape = torch.Size([1])
# get indices as base_samples
base_samples = (
torch.multinomial(
self.weights,
num_samples=sample_shape.numel(),
replacement=True,
)
.reshape(sample_shape)
.to(device=self.device)
)
return self.rsample_from_base_samples(
sample_shape=sample_shape, base_samples=base_samples
)
def rsample_from_base_samples(
self, sample_shape: torch.Size, base_samples: Tensor
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
This is intended to be used with a sampler that produces the corresponding base
samples, and enables acquisition optimization via Sample Average Approximation.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
base_samples: A Tensor of indices as base samples of shape
`sample_shape`, typically obtained from `IndexSampler`.
This is used for deterministic optimization. The predictions of
the ensemble corresponding to the indices are then sampled.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
if base_samples.shape != sample_shape:
raise ValueError("Base samples do not match sample shape.")
# move sample axis to front
values = self.values.movedim(-3, 0)
# sample from the first dimension of values
return values[base_samples, ...]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Abstract base module for all botorch posteriors.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod, abstractproperty
from typing import Optional, Tuple
import torch
from torch import Tensor
class Posterior(ABC):
r"""
Abstract base class for botorch posteriors.
:meta private:
"""
def rsample_from_base_samples(
self,
sample_shape: torch.Size,
base_samples: Tensor,
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
This is intended to be used with a sampler that produces the corresponding base
samples, and enables acquisition optimization via Sample Average Approximation.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
base_samples: The base samples, obtained from the appropriate sampler.
This is a tensor of shape `sample_shape x base_sample_shape`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `rsample_from_base_samples`."
) # pragma: no cover
@abstractmethod
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
pass # pragma: no cover
def sample(self, sample_shape: Optional[torch.Size] = None) -> Tensor:
r"""Sample from the posterior without gradients.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
with torch.no_grad():
return self.rsample(sample_shape=sample_shape)
@property
def event_shape(self) -> torch.Size:
r"""The event shape (i.e. the shape of a single sample)."""
warnings.warn(
"The `event_shape` attribute of `Posterior` is deprecated. It will default "
"to the `event_shape` of the underlying distribution in a future version. "
"Use `_extended_shape` instead.",
DeprecationWarning,
)
return self._extended_shape()
@abstractproperty
def device(self) -> torch.device:
r"""The torch device of the distribution."""
pass # pragma: no cover
@abstractproperty
def dtype(self) -> torch.dtype:
r"""The torch dtype of the distribution."""
pass # pragma: no cover
def quantile(self, value: Tensor) -> Tensor:
r"""Compute quantiles of the distribution.
For multi-variate distributions, this may return the quantiles of
the marginal distributions.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement a `quantile` method."
) # pragma: no cover
def density(self, value: Tensor) -> Tensor:
r"""The probability density (or mass) of the distribution.
For multi-variate distributions, this may return the density of
the marginal distributions.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement a `density` method."
) # pragma: no cover
def _extended_shape(
self, sample_shape: torch.Size = torch.Size() # noqa: B008
) -> torch.Size:
r"""Returns the shape of the samples produced by the posterior with
the given `sample_shape`.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `_extended_shape`."
)
@property
def base_sample_shape(self) -> torch.Size:
r"""The base shape of the base samples expected in `rsample`.
Informs the sampler to produce base samples of shape
`sample_shape x base_sample_shape`.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `base_sample_shape`."
)
@property
def batch_range(self) -> Tuple[int, int]:
r"""The t-batch range.
This is used in samplers to identify the t-batch component of the
`base_sample_shape`. The base samples are expanded over the t-batches to
provide consistency in the acquisition values, i.e., to ensure that a
candidate produces same value regardless of its position on the t-batch.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `batch_range`."
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, Union
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal
from linear_operator.operators import LinearOperator, to_linear_operator
from torch import Tensor
class MultitaskGPPosterior(GPyTorchPosterior):
def __init__(
self,
distribution: MultivariateNormal,
joint_covariance_matrix: LinearOperator,
test_train_covar: LinearOperator,
train_diff: Tensor,
test_mean: Tensor,
train_train_covar: LinearOperator,
train_noise: Union[LinearOperator, Tensor],
test_noise: Optional[Union[LinearOperator, Tensor]] = None,
):
r"""
Posterior class for a Kronecker Multi-task GP model using with ICM kernel.
Extends the standard GPyTorch posterior class by overwriting the rsample
method. In general, this posterior should ONLY be used for MTGP models
that have structured covariances. It should also only be used internally when
called from the `KroneckerMultiTaskGP.posterior(...)` method.
Args:
distribution: Posterior multivariate normal distribution.
joint_covariance_matrix: Joint test train covariance matrix over the entire
tensor.
train_train_covar: Covariance matrix of train points in the data space.
test_obs_covar: Covariance matrix of test x train points in the data space.
train_diff: Difference between train mean and train responses.
train_noise: Training noise covariance.
test_noise: Only used if posterior should contain observation noise.
Testing noise covariance.
"""
super().__init__(distribution=distribution)
self._is_mt = True
self.joint_covariance_matrix = joint_covariance_matrix
self.test_train_covar = test_train_covar
self.train_diff = train_diff
self.test_mean = test_mean
self.train_train_covar = train_train_covar
self.train_noise = train_noise
self.test_noise = test_noise
self.observation_noise = self.test_noise is not None
self.num_train = self.train_diff.shape[-2]
# The following assumes test_train_covar is a SumLinearOperator. TODO: Improve
self.num_tasks = self.test_train_covar.linear_ops[-1].shape[-1]
@property
def base_sample_shape(self) -> torch.Size:
r"""The shape of a base sample used for constructing posterior samples.
Overwrites the standard `base_sample_shape` call to inform samplers that
`n + 2 n_train` samples need to be drawn rather than n samples.
"""
batch_shape = self.joint_covariance_matrix.shape[:-2]
sampling_shape = (
self.joint_covariance_matrix.shape[-2] + self.train_noise.shape[-2]
)
if self.observation_noise:
sampling_shape = sampling_shape + self.test_noise.shape[-2]
return batch_shape + torch.Size((sampling_shape,))
@property
def batch_range(self) -> Tuple[int, int]:
r"""The t-batch range.
This is used in samplers to identify the t-batch component of the
`base_sample_shape`. The base samples are expanded over the t-batches to
provide consistency in the acquisition values, i.e., to ensure that a
candidate produces same value regardless of its position on the t-batch.
"""
return (0, -1)
def _prepare_base_samples(
self, sample_shape: torch.Size, base_samples: Tensor = None
) -> Tuple[Tensor, Tensor]:
covariance_matrix = self.joint_covariance_matrix
joint_size = covariance_matrix.shape[-1]
batch_shape = covariance_matrix.batch_shape
# pre-allocated this as None
test_noise_base_samples = None
if base_samples is not None:
if base_samples.shape[: len(sample_shape)] != sample_shape:
raise RuntimeError(
"sample_shape disagrees with shape of base_samples."
f"provided base sample shape is {base_samples.shape} while"
f"the expected shape is {sample_shape}."
)
if base_samples.shape[-1] != 1:
base_samples = base_samples.unsqueeze(-1)
unsqueezed_dim = -2
appended_shape = joint_size + self.train_train_covar.shape[-1]
if self.observation_noise:
appended_shape = appended_shape + self.test_noise.shape[-1]
if appended_shape != base_samples.shape[unsqueezed_dim]:
# get base_samples to the correct shape by expanding as sample shape,
# batch shape, then rest of dimensions. We have to add first the sample
# shape, then the batch shape of the model, and then finally the shape
# of the test data points squeezed into a single dimension, accessed
# from the test_train_covar.
base_sample_shapes = (
sample_shape + batch_shape + self.test_train_covar.shape[-2:-1]
)
if base_samples.nelement() == base_sample_shapes.numel():
base_samples = base_samples.reshape(base_sample_shapes)
new_base_samples = torch.randn(
*sample_shape,
*batch_shape,
appended_shape - base_samples.shape[-1],
dtype=base_samples.dtype,
device=base_samples.device,
)
base_samples = torch.cat((base_samples, new_base_samples), dim=-1)
base_samples = base_samples.unsqueeze(-1)
else:
raise BotorchTensorDimensionError(
"The base samples are not compatible with base sample shape. "
f"Received base samples of shape {base_samples.shape}, "
f"expected {base_sample_shapes}."
)
if base_samples is None:
# TODO: Allow qMC sampling
base_samples = torch.randn(
*sample_shape,
*batch_shape,
joint_size,
1,
device=covariance_matrix.device,
dtype=covariance_matrix.dtype,
)
noise_base_samples = torch.randn(
*sample_shape,
*batch_shape,
self.train_train_covar.shape[-1],
1,
device=covariance_matrix.device,
dtype=covariance_matrix.dtype,
)
if self.observation_noise:
test_noise_base_samples = torch.randn(
*sample_shape,
*self.test_noise.shape[:-1],
1,
device=covariance_matrix.device,
dtype=covariance_matrix.dtype,
)
else:
# finally split up the base samples
noise_base_samples = base_samples[..., joint_size:, :]
base_samples = base_samples[..., :joint_size, :]
if self.observation_noise:
test_noise_base_samples = noise_base_samples[
..., -self.test_noise.shape[-1] :, :
]
noise_base_samples = noise_base_samples[
..., : -self.test_noise.shape[-1], :
]
return base_samples, noise_base_samples, test_noise_base_samples
def rsample_from_base_samples(
self,
sample_shape: torch.Size,
base_samples: Optional[Tensor],
train_diff: Optional[Tensor] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
base_samples: An (optional) Tensor of `N(0, I)` base samples of
appropriate dimension, typically obtained from a `Sampler`.
This is used for deterministic optimization.
train_diff: Difference between train mean and train responses to assume
during sampling.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
if train_diff is None:
train_diff = self.train_diff
(
base_samples,
noise_base_samples,
test_noise_base_samples,
) = self._prepare_base_samples(
sample_shape=sample_shape, base_samples=base_samples
)
joint_samples = self._draw_from_base_covar(
self.joint_covariance_matrix, base_samples
)
noise_samples = self._draw_from_base_covar(self.train_noise, noise_base_samples)
# pluck out the train + test samples and add the likelihood's noise to the
# train side. This should be fine for higher rank likelihoods.
n_obs = self.num_tasks * self.num_train
n_test = joint_samples.shape[-1] - n_obs
obs_samples, test_samples = torch.split(joint_samples, [n_obs, n_test], dim=-1)
updated_obs_samples = obs_samples + noise_samples
obs_minus_samples = (
train_diff.reshape(*train_diff.shape[:-2], -1) - updated_obs_samples
)
train_covar_plus_noise = self.train_train_covar + self.train_noise
obs_solve = train_covar_plus_noise.solve(obs_minus_samples.unsqueeze(-1))
# and multiply the test-observed matrix against the result of the solve
updated_samples = self.test_train_covar.matmul(obs_solve).squeeze(-1)
# finally, we add the conditioned samples to the prior samples
final_samples = test_samples + updated_samples
# add in likelihood noise if necessary
if self.observation_noise:
test_noise_samples = self._draw_from_base_covar(
self.test_noise, test_noise_base_samples
)
final_samples = final_samples + test_noise_samples
# and reshape
final_samples = final_samples.reshape(
*final_samples.shape[:-1], self.test_mean.shape[-2], self.num_tasks
)
final_samples = final_samples + self.test_mean
return final_samples
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
if sample_shape is None:
sample_shape = torch.Size([1])
return self.rsample_from_base_samples(
sample_shape=sample_shape, base_samples=None
)
def _draw_from_base_covar(
self, covar: Union[Tensor, LinearOperator], base_samples: Tensor
) -> Tensor:
# Now reparameterize those base samples
if not isinstance(covar, LinearOperator):
covar = to_linear_operator(covar)
covar_root = covar.root_decomposition().root
# If necessary, adjust base_samples for rank of root decomposition
if covar_root.shape[-1] < base_samples.shape[-2]:
base_samples = base_samples[..., : covar_root.shape[-1], :]
elif covar_root.shape[-1] > base_samples.shape[-2]:
raise RuntimeError("Incompatible dimension of `base_samples`")
# the mean is included in the posterior forwards so is not included here
res = covar_root.matmul(base_samples)
return res.squeeze(-1)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Posterior module to be used with PyTorch distributions.
"""
from __future__ import annotations
from typing import Any, Dict, Optional
import torch
from botorch.posteriors.posterior import Posterior
from torch import Tensor
from torch.distributions.distribution import Distribution
class TorchPosterior(Posterior):
r"""A posterior based on a PyTorch Distribution.
NOTE: For any attribute that is not explicitly defined on the Posterior level, this
returns the corresponding attribute of the distribution. This allows easy access
to the distribution attributes, without having to expose them on the Posterior.
"""
def __init__(self, distribution: Distribution) -> None:
r"""A posterior based on a PyTorch Distribution.
Args:
distribution: A PyTorch Distribution object.
"""
self.distribution = distribution
# Get the device and dtype from distribution attributes.
for attr in vars(distribution).values():
if isinstance(attr, Tensor):
self._device = attr.device
self._dtype = attr.dtype
break
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
This is generally used with a sampler that produces the base samples.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
if sample_shape is None:
sample_shape = torch.Size()
return self.distribution.rsample(sample_shape=sample_shape)
@property
def device(self) -> torch.device:
r"""The torch device of the distribution."""
return self._device
@property
def dtype(self) -> torch.dtype:
r"""The torch dtype of the distribution."""
return self._dtype
def __getattr__(self, name: str) -> Any:
r"""A catch-all for attributes not defined on the posterior level.
Returns the attributes of the distribution instead.
"""
return getattr(self.distribution, name)
def __getstate__(self) -> Dict[str, Any]:
r"""A minimal utility to support pickle protocol.
Pickle uses `__get/setstate__` to serialize / deserialize the objects.
Since we define `__getattr__` above, it takes precedence over these
methods, and we end up in an infinite loop unless we also define
`__getstate__` and `__setstate__`.
"""
return self.__dict__
def __setstate__(self, d: Dict[str, Any]) -> None:
r"""A minimal utility to support pickle protocol."""
self.__dict__ = d
def quantile(self, value: Tensor) -> Tensor:
r"""Compute quantiles of the distribution.
For multi-variate distributions, this may return the quantiles of
the marginal distributions.
"""
if value.numel() > 1:
return torch.stack([self.quantile(v) for v in value], dim=0)
return self.icdf(value)
def density(self, value: Tensor) -> Tensor:
r"""The probability density (or mass if discrete) of the distribution.
For multi-variate distributions, this may return the density of
the marginal distributions.
"""
if value.numel() > 1:
return torch.stack([self.density(v) for v in value], dim=0)
return self.log_prob(value).exp()
def _extended_shape(
self, sample_shape: torch.Size = torch.Size() # noqa: B008
) -> torch.Size:
r"""Returns the shape of the samples produced by the distribution with
the given `sample_shape`.
"""
return self.distribution._extended_shape(sample_shape=sample_shape)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Callable, Optional, Tuple
import torch
from botorch.posteriors.posterior import Posterior
from torch import Tensor
class TransformedPosterior(Posterior):
r"""A generic transformation of a posterior (implicitly represented)."""
def __init__(
self,
posterior: Posterior,
sample_transform: Callable[[Tensor], Tensor],
mean_transform: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
variance_transform: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
) -> None:
r"""An implicitly represented transformed posterior.
Args:
posterior: The posterior object to be transformed.
sample_transform: A callable applying a sample-level transform to a
`sample_shape x batch_shape x q x m`-dim tensor of samples from
the original posterior, returning a tensor of samples of the
same shape.
mean_transform: A callable transforming a 2-tuple of mean and
variance (both of shape `batch_shape x m x o`) of the original
posterior to the mean of the transformed posterior.
variance_transform: A callable transforming a 2-tuple of mean and
variance (both of shape `batch_shape x m x o`) of the original
posterior to a variance of the transformed posterior.
"""
self._posterior = posterior
self._sample_transform = sample_transform
self._mean_transform = mean_transform
self._variance_transform = variance_transform
@property
def base_sample_shape(self) -> torch.Size:
r"""The shape of a base sample used for constructing posterior samples."""
return self._posterior.base_sample_shape
@property
def batch_range(self) -> Tuple[int, int]:
r"""The t-batch range.
This is used in samplers to identify the t-batch component of the
`base_sample_shape`. The base samples are expanded over the t-batches to
provide consistency in the acquisition values, i.e., to ensure that a
candidate produces same value regardless of its position on the t-batch.
"""
return self._posterior.batch_range
@property
def device(self) -> torch.device:
r"""The torch device of the posterior."""
return self._posterior.device
@property
def dtype(self) -> torch.dtype:
r"""The torch dtype of the posterior."""
return self._posterior.dtype
def _extended_shape(
self, sample_shape: torch.Size = torch.Size() # noqa: B008
) -> torch.Size:
r"""Returns the shape of the samples produced by the posterior with
the given `sample_shape`.
NOTE: This assumes that the `sample_transform` does not change the
shape of the samples.
"""
return self._posterior._extended_shape(sample_shape=sample_shape)
@property
def mean(self) -> Tensor:
r"""The mean of the posterior as a `batch_shape x n x m`-dim Tensor."""
if self._mean_transform is None:
raise NotImplementedError("No mean transform provided.")
try:
variance = self._posterior.variance
except (NotImplementedError, AttributeError):
variance = None
return self._mean_transform(self._posterior.mean, variance)
@property
def variance(self) -> Tensor:
r"""The variance of the posterior as a `batch_shape x n x m`-dim Tensor."""
if self._variance_transform is None:
raise NotImplementedError("No variance transform provided.")
return self._variance_transform(self._posterior.mean, self._posterior.variance)
def rsample_from_base_samples(
self,
sample_shape: torch.Size,
base_samples: Tensor,
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
This is intended to be used with a sampler that produces the corresponding base
samples, and enables acquisition optimization via Sample Average Approximation.
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
base_samples: The base samples, obtained from the appropriate sampler.
This is a tensor of shape `sample_shape x base_sample_shape`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
samples = self._posterior.rsample_from_base_samples(
sample_shape=sample_shape, base_samples=base_samples
)
return self._sample_transform(samples)
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
Returns:
Samples from the posterior, a tensor of shape
`self._extended_shape(sample_shape=sample_shape)`.
"""
samples = self._posterior.rsample(sample_shape=sample_shape)
return self._sample_transform(samples)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Candidate generation utilities.
"""
from __future__ import annotations
import time
import warnings
from functools import partial
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union
import numpy as np
import torch
from botorch.acquisition import AcquisitionFunction
from botorch.exceptions.warnings import OptimizationWarning
from botorch.generation.utils import _remove_fixed_features_from_optimization
from botorch.logging import _get_logger
from botorch.optim.parameter_constraints import (
_arrayify,
make_scipy_bounds,
make_scipy_linear_constraints,
make_scipy_nonlinear_inequality_constraints,
NLC_TOL,
)
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.optim.utils import columnwise_clamp, fix_features
from botorch.optim.utils.timeout import minimize_with_timeout
from scipy.optimize import OptimizeResult
from torch import Tensor
from torch.optim import Optimizer
logger = _get_logger()
TGenCandidates = Callable[[Tensor, AcquisitionFunction, Any], Tuple[Tensor, Tensor]]
def gen_candidates_scipy(
initial_conditions: Tensor,
acquisition_function: AcquisitionFunction,
lower_bounds: Optional[Union[float, Tensor]] = None,
upper_bounds: Optional[Union[float, Tensor]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
options: Optional[Dict[str, Any]] = None,
fixed_features: Optional[Dict[int, Optional[float]]] = None,
timeout_sec: Optional[float] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates using `scipy.optimize.minimize`.
Optimizes an acquisition function starting from a set of initial candidates
using `scipy.optimize.minimize` via a numpy converter.
Args:
initial_conditions: Starting points for optimization, with shape
(b) x q x d.
acquisition_function: Acquisition function to be used.
lower_bounds: Minimum values for each column of initial_conditions.
upper_bounds: Maximum values for each column of initial_conditions.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as
an input and return a `(num_restarts) x q`-dim tensor with the
constraint values. The constraints will later be passed to SLSQP.
options: Options used to control the optimization including "method"
and "maxiter". Select method for `scipy.minimize` using the
"method" key. By default uses L-BFGS-B for box-constrained problems
and SLSQP if inequality or equality constraints are present. If
`with_grad=False`, then we use a two-point finite difference estimate
of the gradient.
fixed_features: This is a dictionary of feature indices to values, where
all generated candidates will have features fixed to these values.
If the dictionary value is None, then that feature will just be
fixed to the clamped value and not optimized. Assumes values to be
compatible with lower_bounds and upper_bounds!
timeout_sec: Timeout (in seconds) for `scipy.optimize.minimize` routine -
if provided, optimization will stop after this many seconds and return
the best solution found so far.
Returns:
2-element tuple containing
- The set of generated candidates.
- The acquisition value for each t-batch.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0., 0.], [1., 2.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
>>> batch_candidates, batch_acq_values = gen_candidates_scipy(
initial_conditions=Xinit,
acquisition_function=qEI,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
)
"""
options = options or {}
options = {**options, "maxiter": options.get("maxiter", 2000)}
# if there are fixed features we may optimize over a domain of lower dimension
reduced_domain = False
if fixed_features:
# if there are no constraints, things are straightforward
if not (
inequality_constraints
or equality_constraints
or nonlinear_inequality_constraints
):
reduced_domain = True
# if there are we need to make sure features are fixed to specific values
else:
reduced_domain = None not in fixed_features.values()
if reduced_domain:
_no_fixed_features = _remove_fixed_features_from_optimization(
fixed_features=fixed_features,
acquisition_function=acquisition_function,
initial_conditions=initial_conditions,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
)
# call the routine with no fixed_features
clamped_candidates, batch_acquisition = gen_candidates_scipy(
initial_conditions=_no_fixed_features.initial_conditions,
acquisition_function=_no_fixed_features.acquisition_function,
lower_bounds=_no_fixed_features.lower_bounds,
upper_bounds=_no_fixed_features.upper_bounds,
inequality_constraints=_no_fixed_features.inequality_constraints,
equality_constraints=_no_fixed_features.equality_constraints,
nonlinear_inequality_constraints=_no_fixed_features.nonlinear_inequality_constraints, # noqa: E501
options=options,
fixed_features=None,
timeout_sec=timeout_sec,
)
clamped_candidates = _no_fixed_features.acquisition_function._construct_X_full(
clamped_candidates
)
return clamped_candidates, batch_acquisition
clamped_candidates = columnwise_clamp(
X=initial_conditions, lower=lower_bounds, upper=upper_bounds
)
shapeX = clamped_candidates.shape
x0 = clamped_candidates.view(-1)
bounds = make_scipy_bounds(
X=initial_conditions, lower_bounds=lower_bounds, upper_bounds=upper_bounds
)
constraints = make_scipy_linear_constraints(
shapeX=shapeX,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
with_grad = options.get("with_grad", True)
if with_grad:
def f_np_wrapper(x: np.ndarray, f: Callable):
"""Given a torch callable, compute value + grad given a numpy array."""
if np.isnan(x).any():
raise RuntimeError(
f"{np.isnan(x).sum()} elements of the {x.size} element array "
f"`x` are NaN."
)
X = (
torch.from_numpy(x)
.to(initial_conditions)
.view(shapeX)
.contiguous()
.requires_grad_(True)
)
X_fix = fix_features(X, fixed_features=fixed_features)
loss = f(X_fix).sum()
# compute gradient w.r.t. the inputs (does not accumulate in leaves)
gradf = _arrayify(torch.autograd.grad(loss, X)[0].contiguous().view(-1))
if np.isnan(gradf).any():
msg = (
f"{np.isnan(gradf).sum()} elements of the {x.size} element "
"gradient array `gradf` are NaN. "
"This often indicates numerical issues."
)
if initial_conditions.dtype != torch.double:
msg += " Consider using `dtype=torch.double`."
raise RuntimeError(msg)
fval = loss.item()
return fval, gradf
else:
def f_np_wrapper(x: np.ndarray, f: Callable):
X = torch.from_numpy(x).to(initial_conditions).view(shapeX).contiguous()
with torch.no_grad():
X_fix = fix_features(X=X, fixed_features=fixed_features)
loss = f(X_fix).sum()
fval = loss.item()
return fval
if nonlinear_inequality_constraints:
# Make sure `batch_limit` is 1 for now.
if not (len(shapeX) == 3 and shapeX[:2] == torch.Size([1, 1])):
raise ValueError(
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given."
)
constraints += make_scipy_nonlinear_inequality_constraints(
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
f_np_wrapper=f_np_wrapper,
x0=x0,
)
x0 = _arrayify(x0)
def f(x):
return -acquisition_function(x)
res = minimize_with_timeout(
fun=f_np_wrapper,
args=(f,),
x0=x0,
method=options.get("method", "SLSQP" if constraints else "L-BFGS-B"),
jac=with_grad,
bounds=bounds,
constraints=constraints,
callback=options.get("callback", None),
options={
k: v
for k, v in options.items()
if k not in ["method", "callback", "with_grad"]
},
timeout_sec=timeout_sec,
)
_process_scipy_result(res=res, options=options)
candidates = fix_features(
X=torch.from_numpy(res.x).to(initial_conditions).reshape(shapeX),
fixed_features=fixed_features,
)
# SLSQP sometimes fails in the line search or may just fail to find a feasible
# candidate in which case we just return the starting point. This happens rarely,
# so it shouldn't be an issue given enough restarts.
if nonlinear_inequality_constraints and any(
nlc(candidates.view(-1)) < NLC_TOL for nlc in nonlinear_inequality_constraints
):
candidates = torch.from_numpy(x0).to(candidates).reshape(shapeX)
warnings.warn(
"SLSQP failed to converge to a solution the satisfies the non-linear "
"constraints. Returning the feasible starting point."
)
clamped_candidates = columnwise_clamp(
X=candidates, lower=lower_bounds, upper=upper_bounds, raise_on_violation=True
)
with torch.no_grad():
batch_acquisition = acquisition_function(clamped_candidates)
return clamped_candidates, batch_acquisition
def gen_candidates_torch(
initial_conditions: Tensor,
acquisition_function: AcquisitionFunction,
lower_bounds: Optional[Union[float, Tensor]] = None,
upper_bounds: Optional[Union[float, Tensor]] = None,
optimizer: Type[Optimizer] = torch.optim.Adam,
options: Optional[Dict[str, Union[float, str]]] = None,
callback: Optional[Callable[[int, Tensor, Tensor], NoReturn]] = None,
fixed_features: Optional[Dict[int, Optional[float]]] = None,
timeout_sec: Optional[float] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates using a `torch.optim` optimizer.
Optimizes an acquisition function starting from a set of initial candidates
using an optimizer from `torch.optim`.
Args:
initial_conditions: Starting points for optimization.
acquisition_function: Acquisition function to be used.
lower_bounds: Minimum values for each column of initial_conditions.
upper_bounds: Maximum values for each column of initial_conditions.
optimizer (Optimizer): The pytorch optimizer to use to perform
candidate search.
options: Options used to control the optimization. Includes
maxiter: Maximum number of iterations
callback: A callback function accepting the current iteration, loss,
and gradients as arguments. This function is executed after computing
the loss and gradients, but before calling the optimizer.
fixed_features: This is a dictionary of feature indices to values, where
all generated candidates will have features fixed to these values.
If the dictionary value is None, then that feature will just be
fixed to the clamped value and not optimized. Assumes values to be
compatible with lower_bounds and upper_bounds!
timeout_sec: Timeout (in seconds) for optimization. If provided,
`gen_candidates_torch` will stop after this many seconds and return
the best solution found so far.
Returns:
2-element tuple containing
- The set of generated candidates.
- The acquisition value for each t-batch.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0., 0.], [1., 2.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
>>> batch_candidates, batch_acq_values = gen_candidates_torch(
initial_conditions=Xinit,
acquisition_function=qEI,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
)
"""
start_time = time.monotonic()
options = options or {}
# if there are fixed features we may optimize over a domain of lower dimension
if fixed_features:
subproblem = _remove_fixed_features_from_optimization(
fixed_features=fixed_features,
acquisition_function=acquisition_function,
initial_conditions=initial_conditions,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
inequality_constraints=None,
equality_constraints=None,
nonlinear_inequality_constraints=None,
)
# call the routine with no fixed_features
elapsed = time.monotonic() - start_time
clamped_candidates, batch_acquisition = gen_candidates_torch(
initial_conditions=subproblem.initial_conditions,
acquisition_function=subproblem.acquisition_function,
lower_bounds=subproblem.lower_bounds,
upper_bounds=subproblem.upper_bounds,
optimizer=optimizer,
options=options,
callback=callback,
fixed_features=None,
timeout_sec=timeout_sec - elapsed if timeout_sec else None,
)
clamped_candidates = subproblem.acquisition_function._construct_X_full(
clamped_candidates
)
return clamped_candidates, batch_acquisition
_clamp = partial(columnwise_clamp, lower=lower_bounds, upper=upper_bounds)
clamped_candidates = _clamp(initial_conditions).requires_grad_(True)
_optimizer = optimizer(params=[clamped_candidates], lr=options.get("lr", 0.025))
i = 0
stop = False
stopping_criterion = ExpMAStoppingCriterion(**options)
while not stop:
i += 1
with torch.no_grad():
X = _clamp(clamped_candidates).requires_grad_(True)
loss = -acquisition_function(X).sum()
grad = torch.autograd.grad(loss, X)[0]
if callback:
callback(i, loss, grad)
def assign_grad():
_optimizer.zero_grad()
clamped_candidates.grad = grad
return loss
_optimizer.step(assign_grad)
stop = stopping_criterion.evaluate(fvals=loss.detach())
if timeout_sec is not None:
runtime = time.monotonic() - start_time
if runtime > timeout_sec:
stop = True
logger.info(f"Optimization timed out after {runtime} seconds.")
clamped_candidates = _clamp(clamped_candidates)
with torch.no_grad():
batch_acquisition = acquisition_function(clamped_candidates)
return clamped_candidates, batch_acquisition
def get_best_candidates(batch_candidates: Tensor, batch_values: Tensor) -> Tensor:
r"""Extract best (q-batch) candidate from batch of candidates
Args:
batch_candidates: A `b x q x d` tensor of `b` q-batch candidates, or a
`b x d` tensor of `b` single-point candidates.
batch_values: A tensor with `b` elements containing the value of the
respective candidate (higher is better).
Returns:
A tensor of size `q x d` (if q-batch mode) or `d` from batch_candidates
with the highest associated value.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0., 0.], [1., 2.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
>>> batch_candidates, batch_acq_values = gen_candidates_scipy(
initial_conditions=Xinit,
acquisition_function=qEI,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
)
>>> best_candidates = get_best_candidates(batch_candidates, batch_acq_values)
"""
best = torch.argmax(batch_values.view(-1), dim=0)
return batch_candidates[best]
def _process_scipy_result(res: OptimizeResult, options: Dict[str, Any]) -> None:
r"""Process scipy optimization result to produce relevant logs and warnings."""
if "success" not in res.keys() or "status" not in res.keys():
with warnings.catch_warnings():
warnings.simplefilter("always", category=OptimizationWarning)
warnings.warn(
"Optimization failed within `scipy.optimize.minimize` with no "
"status returned to `res.`",
OptimizationWarning,
)
elif not res.success:
if (
"ITERATIONS REACHED LIMIT" in res.message
or "Iteration limit reached" in res.message
):
logger.info(
"`scipy.minimize` exited by reaching the iteration limit of "
f"`maxiter: {options.get('maxiter')}`."
)
elif "EVALUATIONS EXCEEDS LIMIT" in res.message:
logger.info(
"`scipy.minimize` exited by reaching the function evaluation limit of "
f"`maxfun: {options.get('maxfun')}`."
)
elif "Optimization timed out after" in res.message:
logger.info(res.message)
else:
with warnings.catch_warnings():
warnings.simplefilter("always", category=OptimizationWarning)
warnings.warn(
f"Optimization failed within `scipy.optimize.minimize` with status "
f"{res.status} and message {res.message}.",
OptimizationWarning,
)
def minimize(*args, **kwargs):
"""Deprecated, use `botorch.generation.gen.minimize_with_timeout`."""
# TODO: Reap this after the next stable Ax release.
warnings.warn(
"`botorch.generation.gen.minimize` is an alias for "
"`botorch.generation.gen.minimize_with_timeout` and will "
"be removed in a future release.",
DeprecationWarning,
)
return minimize_with_timeout(*args, **kwargs)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.generation.gen import (
gen_candidates_scipy,
gen_candidates_torch,
get_best_candidates,
)
from botorch.generation.sampling import BoltzmannSampling, MaxPosteriorSampling
__all__ = [
"gen_candidates_scipy",
"gen_candidates_torch",
"get_best_candidates",
"BoltzmannSampling",
"MaxPosteriorSampling",
]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from botorch.acquisition import AcquisitionFunction, FixedFeatureAcquisitionFunction
from botorch.optim.parameter_constraints import (
_generate_unfixed_lin_constraints,
_generate_unfixed_nonlin_constraints,
)
from torch import Tensor
def _flip_sub_unique(x: Tensor, k: int) -> Tensor:
"""Get the first k unique elements of a single-dimensional tensor, traversing the
tensor from the back.
Args:
x: A single-dimensional tensor
k: the number of elements to return
Returns:
A tensor with min(k, |x|) elements.
Example:
>>> x = torch.tensor([1, 6, 4, 3, 6, 3])
>>> y = _flip_sub_unique(x, 3) # tensor([3, 6, 4])
>>> y = _flip_sub_unique(x, 4) # tensor([3, 6, 4, 1])
>>> y = _flip_sub_unique(x, 10) # tensor([3, 6, 4, 1])
NOTE: This should really be done in C++ to speed up the loop. Also, we would like
to make this work for arbitrary batch shapes, I'm sure this can be sped up.
"""
n = len(x)
i = 0
out = set()
idcs = torch.empty(k, dtype=torch.long)
for j, xi in enumerate(x.flip(0).tolist()):
if xi not in out:
out.add(xi)
idcs[i] = n - 1 - j
i += 1
if len(out) >= k:
break
return x[idcs[: len(out)]]
@dataclass(frozen=True, repr=False, eq=False)
class _NoFixedFeatures:
"""
Dataclass to store the objects after removing fixed features.
Objects here refer to the acquisition function, initial conditions,
bounds and parameter constraints.
"""
acquisition_function: FixedFeatureAcquisitionFunction
initial_conditions: Tensor
lower_bounds: Optional[Union[float, Tensor]]
upper_bounds: Optional[Union[float, Tensor]]
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]]
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]]
nonlinear_inequality_constraints: Optional[List[Callable[[Tensor], Tensor]]]
def _remove_fixed_features_from_optimization(
fixed_features: Dict[int, Optional[float]],
acquisition_function: AcquisitionFunction,
initial_conditions: Tensor,
lower_bounds: Optional[Union[float, Tensor]],
upper_bounds: Optional[Union[float, Tensor]],
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
nonlinear_inequality_constraints: Optional[List[Callable[[Tensor], Tensor]]],
) -> _NoFixedFeatures:
"""
Given a set of non-empty fixed features, this function effectively reduces the
dimensionality of the domain that the acquisition function is being optimized
over by removing the set of fixed features. Consequently, this function returns a
new `FixedFeatureAcquisitionFunction`, new constraints, and bounds defined over
unfixed features.
Args:
fixed_features: This is a dictionary of feature indices to values, where
all generated candidates will have features fixed to these values.
If the dictionary value is None, then that feature will just be
fixed to the clamped value and not optimized. Assumes values to be
compatible with lower_bounds and upper_bounds!
acquisition_function: Acquisition function over the original domain being
maximized.
initial_conditions: Starting points for optimization w.r.t. the complete domain.
lower_bounds: Minimum values for each column of initial_conditions.
upper_bounds: Minimum values for each column of initial_conditions.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`sum_i (X[indices[i]] * coefficients[i]) = rhs`.
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as
an input and return a `(num_restarts) x q`-dim tensor with the
constraint values.
Returns:
_NoFixedFeatures dataclass object.
"""
# sort the keys for consistency
sorted_keys = sorted(fixed_features)
sorted_values = []
for key in sorted_keys:
if fixed_features[key] is None:
val = initial_conditions[..., [key]]
else:
val = fixed_features[key]
sorted_values.append(val)
d = initial_conditions.shape[-1]
acquisition_function = FixedFeatureAcquisitionFunction(
acq_function=acquisition_function,
d=d,
columns=sorted_keys,
values=sorted_values,
)
# extract initial_conditions, bounds at unfixed indices
unfixed_indices = sorted(set(range(d)) - set(sorted_keys))
initial_conditions = initial_conditions[..., unfixed_indices]
if isinstance(lower_bounds, Tensor):
lower_bounds = lower_bounds[..., unfixed_indices]
if isinstance(upper_bounds, Tensor):
upper_bounds = upper_bounds[..., unfixed_indices]
inequality_constraints = _generate_unfixed_lin_constraints(
constraints=inequality_constraints,
fixed_features=fixed_features,
dimension=d,
eq=False,
)
equality_constraints = _generate_unfixed_lin_constraints(
constraints=equality_constraints,
fixed_features=fixed_features,
dimension=d,
eq=True,
)
nonlinear_inequality_constraints = _generate_unfixed_nonlin_constraints(
constraints=nonlinear_inequality_constraints,
fixed_features=fixed_features,
dimension=d,
)
return _NoFixedFeatures(
acquisition_function=acquisition_function,
initial_conditions=initial_conditions,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Sampling-based generation strategies.
A SamplingStrategy returns samples from the input points (i.e. Tensors in feature
space), rather than the value for a set of tensors, as acquisition functions do.
The q-batch dimension has similar semantics as for acquisition functions in that the
points across the q-batch are considered jointly for sampling (where as for
q-acquisition functions we evaluate the joint value of the q-batch).
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import (
IdentityMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.generation.utils import _flip_sub_unique
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import MultiTaskGP
from botorch.utils.sampling import batched_multinomial
from botorch.utils.transforms import standardize
from torch import Tensor
from torch.nn import Module
class SamplingStrategy(Module, ABC):
r"""
Abstract base class for sampling-based generation strategies.
:meta private:
"""
@abstractmethod
def forward(self, X: Tensor, num_samples: int = 1, **kwargs: Any) -> Tensor:
r"""Sample according to the SamplingStrategy.
Args:
X: A `batch_shape x N x d`-dim Tensor from which to sample (in the `N`
dimension).
num_samples: The number of samples to draw.
kwargs: Additional implementation-specific kwargs.
Returns:
A `batch_shape x num_samples x d`-dim Tensor of samples from `X`, where
`X[..., i, :]` is the `i`-th sample.
"""
pass # pragma: no cover
class MaxPosteriorSampling(SamplingStrategy):
r"""Sample from a set of points according to their max posterior value.
Example:
>>> MPS = MaxPosteriorSampling(model) # model w/ feature dim d=3
>>> X = torch.rand(2, 100, 3)
>>> sampled_X = MPS(X, num_samples=5)
"""
def __init__(
self,
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
replacement: bool = True,
) -> None:
r"""Constructor for the SamplingStrategy base class.
Args:
model: A fitted model.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: An optional PosteriorTransform.
replacement: If True, sample with replacement.
"""
super().__init__()
self.model = model
self.objective = IdentityMCObjective() if objective is None else objective
self.posterior_transform = posterior_transform
self.replacement = replacement
def forward(
self, X: Tensor, num_samples: int = 1, observation_noise: bool = False
) -> Tensor:
r"""Sample from the model posterior.
Args:
X: A `batch_shape x N x d`-dim Tensor from which to sample (in the `N`
dimension) according to the maximum posterior value under the objective.
num_samples: The number of samples to draw.
observation_noise: If True, sample with observation noise.
Returns:
A `batch_shape x num_samples x d`-dim Tensor of samples from `X`, where
`X[..., i, :]` is the `i`-th sample.
"""
posterior = self.model.posterior(
X,
observation_noise=observation_noise,
posterior_transform=self.posterior_transform,
)
# num_samples x batch_shape x N x m
samples = posterior.rsample(sample_shape=torch.Size([num_samples]))
return self.maximize_samples(X, samples, num_samples)
def maximize_samples(self, X: Tensor, samples: Tensor, num_samples: int = 1):
obj = self.objective(samples, X=X) # num_samples x batch_shape x N
if self.replacement:
# if we allow replacement then things are simple(r)
idcs = torch.argmax(obj, dim=-1)
else:
# if we need to deduplicate we have to do some tensor acrobatics
# first we get the indices associated w/ the num_samples top samples
_, idcs_full = torch.topk(obj, num_samples, dim=-1)
# generate some indices to smartly index into the lower triangle of
# idcs_full (broadcasting across batch dimensions)
ridx, cindx = torch.tril_indices(num_samples, num_samples)
# pick the unique indices in order - since we look at the lower triangle
# of the index matrix and we don't sort, this achieves deduplication
sub_idcs = idcs_full[ridx, ..., cindx]
if sub_idcs.ndim == 1:
idcs = _flip_sub_unique(sub_idcs, num_samples)
elif sub_idcs.ndim == 2:
# TODO: Find a better way to do this
n_b = sub_idcs.size(-1)
idcs = torch.stack(
[_flip_sub_unique(sub_idcs[:, i], num_samples) for i in range(n_b)],
dim=-1,
)
else:
# TODO: Find a general way to do this efficiently.
raise NotImplementedError(
"MaxPosteriorSampling without replacement for more than a single "
"batch dimension is not yet implemented."
)
# idcs is num_samples x batch_shape, to index into X we need to permute for it
# to have shape batch_shape x num_samples
if idcs.ndim > 1:
idcs = idcs.permute(*range(1, idcs.ndim), 0)
# in order to use gather, we need to repeat the index tensor d times
idcs = idcs.unsqueeze(-1).expand(*idcs.shape, X.size(-1))
# now if the model is batched batch_shape will not necessarily be the
# batch_shape of X, so we expand X to the proper shape
Xe = X.expand(*obj.shape[1:], X.size(-1))
# finally we can gather along the N dimension
return torch.gather(Xe, -2, idcs)
class BoltzmannSampling(SamplingStrategy):
r"""Sample from a set of points according to a tempered acquisition value.
Given an acquisition function `acq_func`, this sampling strategies draws
samples from a `batch_shape x N x d`-dim tensor `X` according to a multinomial
distribution over its indices given by
weight(X[..., i, :]) ~ exp(eta * standardize(acq_func(X[..., i, :])))
where `standardize(Y)` standardizes `Y` to zero mean and unit variance. As the
temperature parameter `eta -> 0`, this approaches uniform sampling, while as
`eta -> infty`, this approaches selecting the maximizer(s) of the acquisition
function `acq_func`.
Example:
>>> UCB = UpperConfidenceBound(model, beta=0.1)
>>> BMUCB = BoltzmannSampling(UCB, eta=0.5)
>>> X = torch.rand(2, 100, 3)
>>> sampled_X = BMUCB(X, num_samples=5)
"""
def __init__(
self, acq_func: AcquisitionFunction, eta: float = 1.0, replacement: bool = True
) -> None:
r"""Boltzmann Acquisition Value Sampling.
Args:
acq_func: The acquisition function; to be evaluated in batch at the
individual points of a q-batch (not jointly, as is the case for
acquisition functions). Can be analytic or Monte-Carlo.
eta: The temperature parameter in the softmax.
replacement: If True, sample with replacement.
"""
super().__init__()
self.acq_func = acq_func
self.eta = eta
self.replacement = replacement
def forward(self, X: Tensor, num_samples: int = 1) -> Tensor:
r"""Sample from a tempered value of the acquisition function value.
Args:
X: A `batch_shape x N x d`-dim Tensor from which to sample (in the `N`
dimension) according to the maximum posterior value under the objective.
Note that if a batched model is used in the underlying acquisition
function, then its batch shape must be broadcastable to `batch_shape`.
num_samples: The number of samples to draw.
Returns:
A `batch_shape x num_samples x d`-dim Tensor of samples from `X`, where
`X[..., i, :]` is the `i`-th sample.
"""
# TODO: Can we get the model batch shape property from the model?
# we move the `N` dimension to the front for evaluating the acquisition function
# so that X_eval has shape `N x batch_shape x 1 x d`
X_eval = X.permute(-2, *range(X.ndim - 2), -1).unsqueeze(-2)
acqval = self.acq_func(X_eval) # N x batch_shape
# now move the `N` dimension back (this is the number of categories)
acqval = acqval.permute(*range(1, X.ndim - 1), 0) # batch_shape x N
weights = torch.exp(self.eta * standardize(acqval)) # batch_shape x N
idcs = batched_multinomial(
weights=weights, num_samples=num_samples, replacement=self.replacement
)
# now do some gathering acrobatics to select the right elements from X
return torch.gather(X, -2, idcs.unsqueeze(-1).expand(*idcs.shape, X.size(-1)))
class ConstrainedMaxPosteriorSampling(MaxPosteriorSampling):
r"""Sample from a set of points according to
their max posterior value,
which also likely meet a set of constraints
c1(x) <= 0, c2(x) <= 0, ..., cm(x) <= 0
c1, c2, ..., cm are black-box constraint functions
Each constraint function is modeled by a seperate
surrogate GP constraint model
We sample points for which the posterior value
for each constraint model <= 0,
as described in https://doi.org/10.48550/arxiv.2002.08526
Example:
>>> CMPS = ConstrainedMaxPosteriorSampling(model,
constraint_model=ModelListGP(cmodel1, cmodel2,
..., cmodelm) # models w/ feature dim d=3
>>> X = torch.rand(2, 100, 3)
>>> sampled_X = CMPS(X, num_samples=5)
"""
def __init__(
self,
model: Model,
constraint_model: Union[ModelListGP, MultiTaskGP],
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
replacement: bool = True,
minimize_constraints_only: bool = False,
) -> None:
r"""Constructor for the SamplingStrategy base class.
Args:
model: A fitted model.
objective: The MCAcquisitionObjective under
which the samples are evaluated.
Defaults to `IdentityMCObjective()`.
posterior_transform: An optional PosteriorTransform.
replacement: If True, sample with replacement.
constraint_model: either a ModelListGP where each submodel
is a GP model for one constraint function,
or a MultiTaskGP model where each task is one
constraint function
All constraints are of the form c(x) <= 0.
In the case when the constraint model predicts
that all candidates violate constraints,
we pick the candidates with minimum violation.
minimize_constraints_only: False by default, if true,
we will automatically return the candidates
with minimum posterior constraint values,
(minimum predicted c(x) summed over all constraints)
reguardless of predicted objective values.
"""
super().__init__(
model=model,
objective=objective,
posterior_transform=posterior_transform,
replacement=replacement,
)
self.constraint_model = constraint_model
self.minimize_constraints_only = minimize_constraints_only
def forward(
self, X: Tensor, num_samples: int = 1, observation_noise: bool = False
) -> Tensor:
r"""Sample from the model posterior.
Args:
X: A `batch_shape x N x d`-dim Tensor
from which to sample (in the `N`
dimension) according to the maximum
posterior value under the objective.
num_samples: The number of samples to draw.
observation_noise: If True, sample with observation noise.
Returns:
A `batch_shape x num_samples x d`-dim
Tensor of samples from `X`, where
`X[..., i, :]` is the `i`-th sample.
"""
posterior = self.model.posterior(X, observation_noise=observation_noise)
samples = posterior.rsample(sample_shape=torch.Size([num_samples]))
c_posterior = self.constraint_model.posterior(
X, observation_noise=observation_noise
)
constraint_samples = c_posterior.rsample(sample_shape=torch.Size([num_samples]))
valid_samples = constraint_samples <= 0
if valid_samples.shape[-1] > 1: # if more than one constraint
valid_samples = torch.all(valid_samples, dim=-1).unsqueeze(-1)
if (valid_samples.sum() == 0) or self.minimize_constraints_only:
# if none of the samples meet the constraints
# we pick the one that minimizes total violation
constraint_samples = constraint_samples.sum(dim=-1)
idcs = torch.argmin(constraint_samples, dim=-1)
if idcs.ndim > 1:
idcs = idcs.permute(*range(1, idcs.ndim), 0)
idcs = idcs.unsqueeze(-1).expand(*idcs.shape, X.size(-1))
Xe = X.expand(*constraint_samples.shape[1:], X.size(-1))
return torch.gather(Xe, -2, idcs)
# replace all violators with -infinty so it will never choose them
replacement_infs = -torch.inf * torch.ones(samples.shape).to(X.device).to(
X.dtype
)
samples = torch.where(valid_samples, samples, replacement_infs)
return self.maximize_samples(X, samples, num_samples)
|
#!/usr/bin/env python
import confu
parser = confu.standard_parser("cpuinfo configuration script")
parser.add_argument("--log", dest="log_level",
choices=("none", "fatal", "error", "warning", "info", "debug"), default="error")
parser.add_argument("--mock", dest="mock", action="store_true")
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
macros = {
"CPUINFO_LOG_LEVEL": {"none": 0, "fatal": 1, "error": 2, "warning": 3, "info": 4, "debug": 5}[options.log_level],
"CLOG_LOG_TO_STDIO": int(not options.mock),
"CPUINFO_MOCK": int(options.mock),
}
if build.target.is_linux or build.target.is_android:
macros["_GNU_SOURCE"] = 1
build.export_cpath("include", ["cpuinfo.h"])
with build.options(source_dir="src", macros=macros, extra_include_dirs="src", deps=build.deps.clog):
sources = ["api.c", "init.c", "cache.c"]
if build.target.is_x86 or build.target.is_x86_64:
sources += [
"x86/init.c", "x86/info.c", "x86/isa.c", "x86/vendor.c",
"x86/uarch.c", "x86/name.c", "x86/topology.c",
"x86/cache/init.c", "x86/cache/descriptor.c", "x86/cache/deterministic.c",
]
if build.target.is_macos:
sources += ["x86/mach/init.c"]
elif build.target.is_linux or build.target.is_android:
sources += [
"x86/linux/init.c",
"x86/linux/cpuinfo.c",
]
if build.target.is_arm or build.target.is_arm64:
sources += ["arm/uarch.c", "arm/cache.c"]
if build.target.is_linux or build.target.is_android:
sources += [
"arm/linux/init.c",
"arm/linux/cpuinfo.c",
"arm/linux/clusters.c",
"arm/linux/midr.c",
"arm/linux/chipset.c",
"arm/linux/hwcap.c",
]
if build.target.is_arm:
sources.append("arm/linux/aarch32-isa.c")
elif build.target.is_arm64:
sources.append("arm/linux/aarch64-isa.c")
if build.target.is_android:
sources += [
"arm/android/properties.c",
]
if build.target.is_macos:
sources += ["mach/topology.c"]
if build.target.is_linux or build.target.is_android:
sources += [
"linux/cpulist.c",
"linux/smallfile.c",
"linux/multiline.c",
"linux/processors.c",
]
if options.mock:
sources += ["linux/mockfile.c"]
build.static_library("cpuinfo", map(build.cc, sources))
with build.options(source_dir="tools", deps=[build, build.deps.clog]):
build.executable("cpu-info", build.cc("cpu-info.c"))
build.executable("isa-info", build.cc("isa-info.c"))
build.executable("cache-info", build.cc("cache-info.c"))
if build.target.is_x86_64:
with build.options(source_dir="tools", include_dirs=["src", "include"]):
build.executable("cpuid-dump", build.cc("cpuid-dump.c"))
with build.options(source_dir="test", deps=[build, build.deps.clog, build.deps.googletest]):
build.smoketest("init-test", build.cxx("init.cc"))
if build.target.is_linux:
build.smoketest("get-current-test", build.cxx("get-current.cc"))
if build.target.is_x86_64:
build.smoketest("brand-string-test", build.cxx("name/brand-string.cc"))
if options.mock:
with build.options(source_dir="test", include_dirs="test", macros="CPUINFO_MOCK", deps=[build, build.deps.googletest]):
if build.target.is_arm64 and build.target.is_linux:
build.unittest("scaleway-test", build.cxx("scaleway.cc"))
if not options.mock:
with build.options(source_dir="bench", deps=[build, build.deps.clog, build.deps.googlebenchmark]):
build.benchmark("init-bench", build.cxx("init.cc"))
if not build.target.is_macos:
build.benchmark("get-current-bench", build.cxx("get-current.cc"))
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
|
#!/usr/bin/env python
import os
import sys
import argparse
import shutil
parser = argparse.ArgumentParser(description='Android system files extractor')
parser.add_argument("-p", "--prefix", metavar="NAME", required=True,
help="Prefix for stored files, e.g. galaxy-s7-us")
SYSTEM_FILES = [
"/proc/cpuinfo",
"/sys/devices/system/cpu/kernel_max",
"/sys/devices/system/cpu/possible",
"/sys/devices/system/cpu/present",
]
CPU_FILES = [
"cpufreq/cpuinfo_max_freq",
"cpufreq/cpuinfo_min_freq",
"topology/physical_package_id",
"topology/core_siblings_list",
"topology/core_id",
"topology/thread_siblings_list",
]
CACHE_FILES = [
"allocation_policy",
"coherency_line_size",
"level",
"number_of_sets",
"shared_cpu_list",
"size",
"type",
"ways_of_associativity",
"write_policy",
]
def c_escape(string):
c_string = ""
for c in string:
if c == "\\":
c_string += "\\\\"
elif c == "\"":
c_string += "\\\""
elif c == "\t":
c_string += "\\t"
elif c == "\n":
c_string += "\\n"
elif c == "\r":
c_string += "\\r"
elif ord(c) == 0:
c_string += "\\0"
elif 32 <= ord(c) < 127:
c_string += c
else:
c_string += "x%02X" % ord(c)
return c_string
def dump_system_file(stream, path):
try:
with open(path, "rb") as device_file:
content = device_file.read()
stream.write("\t{\n")
stream.write("\t\t.path = \"%s\",\n" % path)
stream.write("\t\t.size = %d,\n" % len(content))
if len(content.splitlines()) > 1:
stream.write("\t\t.content =")
for line in content.splitlines(True):
stream.write("\n\t\t\t\"%s\"" % c_escape(line))
stream.write(",\n")
else:
stream.write("\t\t.content = \"%s\",\n" % c_escape(content))
stream.write("\t},\n")
return True
except IOError:
pass
def main(args):
options = parser.parse_args(args)
# with open(os.path.join("test", "dmesg", options.prefix + ".log"), "w") as dmesg_log:
# dmesg_log.write(device.Shell("dmesg"))
with open(os.path.join("test", options.prefix + ".h"), "w") as file_header:
file_header.write("struct cpuinfo_mock_file filesystem[] = {\n")
for path in SYSTEM_FILES:
dump_system_file(file_header, path)
for cpu in range(16):
for filename in CPU_FILES:
path = "/sys/devices/system/cpu/cpu%d/%s" % (cpu, filename)
dump_system_file(file_header, path)
for index in range(10):
for filename in CACHE_FILES:
path = "/sys/devices/system/cpu/cpu%d/cache/index%d/%s" % (cpu, index, filename)
dump_system_file(file_header, path)
file_header.write("\t{ NULL },\n")
file_header.write("};\n")
shutil.copy("/proc/cpuinfo",
os.path.join("test", "cpuinfo", options.prefix + ".log"))
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
parser = argparse.ArgumentParser(description='x86 CPUID dump parser')
parser.add_argument("input", metavar="INPUT", nargs=1,
help="Path to CPUID dump log")
def main(args):
options = parser.parse_args(args)
cpuid_dump = list()
for line in open(options.input[0]).read().splitlines():
match = re.match(r"CPUID ([\dA-F]{8}): ([\dA-F]{8})-([\dA-F]{8})-([\dA-F]{8})-([\dA-F]{8})", line)
if match is not None:
input_eax, eax, ebx, ecx, edx = tuple(int(match.group(i), 16) for i in [1, 2, 3, 4, 5])
line = line[match.end(0):].strip()
input_ecx = None
match = re.match(r"\[SL (\d{2})\]", line)
if match is not None:
input_ecx = int(match.group(1), 16)
cpuid_dump.append((input_eax, input_ecx, eax, ebx, ecx, edx))
print("struct cpuinfo_mock_cpuid cpuid_dump[] = {")
for input_eax, input_ecx, eax, ebx, ecx, edx in cpuid_dump:
print("\t{")
print("\t\t.input_eax = 0x%08X," % input_eax)
if input_ecx is not None:
print("\t\t.input_ecx = 0x%08X," % input_ecx)
print("\t\t.eax = 0x%08X," % eax)
print("\t\t.ebx = 0x%08X," % ebx)
print("\t\t.ecx = 0x%08X," % ecx)
print("\t\t.edx = 0x%08X," % edx)
print("\t},")
print("};")
print()
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
import os
import sys
import string
import argparse
import subprocess
import tempfile
root_dir = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description='Android system files extractor')
parser.add_argument("-p", "--prefix", metavar="NAME", required=True,
help="Prefix for stored files, e.g. galaxy-s7-us")
# System files which need to be read with `adb shell cat filename`
# instead of `adb pull filename`
SHELL_PREFIX = [
"/sys/class/kgsl/kgsl-3d0/",
]
SYSTEM_FILES = [
"/proc/cpuinfo",
"/system/build.prop",
"/sys/class/kgsl/kgsl-3d0/bus_split",
"/sys/class/kgsl/kgsl-3d0/clock_mhz",
"/sys/class/kgsl/kgsl-3d0/deep_nap_timer",
"/sys/class/kgsl/kgsl-3d0/default_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/dev",
"/sys/class/kgsl/kgsl-3d0/devfreq/available_frequencies",
"/sys/class/kgsl/kgsl-3d0/devfreq/available_governors",
"/sys/class/kgsl/kgsl-3d0/devfreq/cur_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/governor",
"/sys/class/kgsl/kgsl-3d0/devfreq/gpu_load",
"/sys/class/kgsl/kgsl-3d0/devfreq/max_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/min_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/polling_interval",
"/sys/class/kgsl/kgsl-3d0/devfreq/suspend_time",
"/sys/class/kgsl/kgsl-3d0/devfreq/target_freq",
"/sys/class/kgsl/kgsl-3d0/devfreq/trans_stat",
"/sys/class/kgsl/kgsl-3d0/device/op_cpu_table",
"/sys/class/kgsl/kgsl-3d0/freq_table_mhz",
"/sys/class/kgsl/kgsl-3d0/ft_fast_hang_detect",
"/sys/class/kgsl/kgsl-3d0/ft_hang_intr_status",
"/sys/class/kgsl/kgsl-3d0/ft_long_ib_detect",
"/sys/class/kgsl/kgsl-3d0/ft_pagefault_policy",
"/sys/class/kgsl/kgsl-3d0/ft_policy",
"/sys/class/kgsl/kgsl-3d0/gpu_available_frequencies",
"/sys/class/kgsl/kgsl-3d0/gpu_busy_percentage",
"/sys/class/kgsl/kgsl-3d0/gpu_clock_stats",
"/sys/class/kgsl/kgsl-3d0/gpu_llc_slice_enable",
"/sys/class/kgsl/kgsl-3d0/gpu_model",
"/sys/class/kgsl/kgsl-3d0/gpubusy",
"/sys/class/kgsl/kgsl-3d0/gpuclk",
"/sys/class/kgsl/kgsl-3d0/gpuhtw_llc_slice_enable",
"/sys/class/kgsl/kgsl-3d0/hwcg",
"/sys/class/kgsl/kgsl-3d0/idle_timer",
"/sys/class/kgsl/kgsl-3d0/lm",
"/sys/class/kgsl/kgsl-3d0/max_gpuclk",
"/sys/class/kgsl/kgsl-3d0/max_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/min_clock_mhz",
"/sys/class/kgsl/kgsl-3d0/min_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/num_pwrlevels",
"/sys/class/kgsl/kgsl-3d0/pmqos_active_latency",
"/sys/class/kgsl/kgsl-3d0/popp",
"/sys/class/kgsl/kgsl-3d0/preempt_count",
"/sys/class/kgsl/kgsl-3d0/preempt_level",
"/sys/class/kgsl/kgsl-3d0/preemption",
"/sys/class/kgsl/kgsl-3d0/pwrscale",
"/sys/class/kgsl/kgsl-3d0/reset_count",
"/sys/class/kgsl/kgsl-3d0/skipsaverestore",
"/sys/class/kgsl/kgsl-3d0/sptp_pc",
"/sys/class/kgsl/kgsl-3d0/thermal_pwrlevel",
"/sys/class/kgsl/kgsl-3d0/throttling",
"/sys/class/kgsl/kgsl-3d0/usesgmem",
"/sys/class/kgsl/kgsl-3d0/wake_nice",
"/sys/class/kgsl/kgsl-3d0/wake_timeout",
"/sys/devices/soc0/accessory_chip",
"/sys/devices/soc0/build_id",
"/sys/devices/soc0/chip_family",
"/sys/devices/soc0/chip_name",
"/sys/devices/soc0/family",
"/sys/devices/soc0/foundry_id",
"/sys/devices/soc0/hw_platform",
"/sys/devices/soc0/image_crm_version",
"/sys/devices/soc0/image_variant",
"/sys/devices/soc0/image_version",
"/sys/devices/soc0/images",
"/sys/devices/soc0/machine",
"/sys/devices/soc0/ncluster_array_offset",
"/sys/devices/soc0/ndefective_parts_array_offset",
"/sys/devices/soc0/nmodem_supported",
"/sys/devices/soc0/nproduct_id",
"/sys/devices/soc0/num_clusters",
"/sys/devices/soc0/num_defective_parts",
"/sys/devices/soc0/platform_subtype",
"/sys/devices/soc0/platform_subtype_id",
"/sys/devices/soc0/platform_version",
"/sys/devices/soc0/pmic_die_revision",
"/sys/devices/soc0/pmic_model",
"/sys/devices/soc0/raw_device_family",
"/sys/devices/soc0/raw_device_number",
"/sys/devices/soc0/raw_id",
"/sys/devices/soc0/raw_version",
"/sys/devices/soc0/revision",
"/sys/devices/soc0/select_image",
"/sys/devices/soc0/serial_number",
"/sys/devices/soc0/soc_id",
"/sys/devices/soc0/vendor",
"/sys/devices/system/b.L/big_threads",
"/sys/devices/system/b.L/boot_cluster",
"/sys/devices/system/b.L/core_status",
"/sys/devices/system/b.L/little_threads",
"/sys/devices/system/b.L/down_migrations",
"/sys/devices/system/b.L/up_migrations",
"/sys/devices/system/cpu/isolated",
"/sys/devices/system/cpu/kernel_max",
"/sys/devices/system/cpu/modalias",
"/sys/devices/system/cpu/offline",
"/sys/devices/system/cpu/online",
"/sys/devices/system/cpu/possible",
"/sys/devices/system/cpu/present",
"/sys/devices/system/cpu/sched_isolated",
"/sys/devices/system/cpu/clusterhotplug/cur_hstate",
"/sys/devices/system/cpu/clusterhotplug/down_freq",
"/sys/devices/system/cpu/clusterhotplug/down_tasks",
"/sys/devices/system/cpu/clusterhotplug/down_threshold",
"/sys/devices/system/cpu/clusterhotplug/sampling_rate",
"/sys/devices/system/cpu/clusterhotplug/time_in_state",
"/sys/devices/system/cpu/clusterhotplug/up_freq",
"/sys/devices/system/cpu/clusterhotplug/up_tasks",
"/sys/devices/system/cpu/clusterhotplug/up_threshold",
"/sys/devices/system/cpu/cpufreq/all_time_in_state",
"/sys/devices/system/cpu/cpufreq/current_in_state",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_cpu_num",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_max_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/big_min_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/hmp_boost_type",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/hmp_prev_boost_type",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_cpu_num",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_divider",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_max_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_min_freq",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/ltl_min_lock",
"/sys/devices/system/cpu/cpufreq/cpufreq_limit/requests",
"/sys/devices/system/cpu/cpuidle/current_driver",
"/sys/devices/system/cpu/cpuidle/current_governor_ro",
"/sys/devices/system/cpu/cputopo/cpus_per_cluster",
"/sys/devices/system/cpu/cputopo/big_cpumask",
"/sys/devices/system/cpu/cputopo/glbinfo",
"/sys/devices/system/cpu/cputopo/is_big_little",
"/sys/devices/system/cpu/cputopo/is_multi_cluster",
"/sys/devices/system/cpu/cputopo/little_cpumask",
"/sys/devices/system/cpu/cputopo/nr_clusters",
"/sys/devices/system/sched/idle_prefer",
"/sys/devices/system/sched/sched_boost",
]
CPU_FILES = [
"core_ctl/active_cpus",
"core_ctl/busy_up_thres",
"core_ctl/busy_down_thres",
"core_ctl/enable",
"core_ctl/global_state",
"core_ctl/is_big_cluster",
"core_ctl/max_cpus",
"core_ctl/min_cpus",
"core_ctl/need_cpus",
"core_ctl/not_preferred",
"core_ctl/offline_delay_ms",
"core_ctl/task_thres",
"current_driver",
"current_governor_ro",
"cpuidle/driver/name",
"cpufreq/affected_cpus",
"cpufreq/cpuinfo_max_freq",
"cpufreq/cpuinfo_min_freq",
"cpufreq/cpuinfo_transition_latency",
"cpufreq/related_cpus",
"cpufreq/scaling_available_frequencies",
"cpufreq/scaling_available_governors",
"cpufreq/scaling_cur_freq",
"cpufreq/scaling_driver",
"cpufreq/scaling_governor",
"cpufreq/scaling_max_freq",
"cpufreq/scaling_min_freq",
"cpufreq/sched/down_throttle_nsec",
"cpufreq/sched/up_throttle_nsec",
"cpufreq/stats/time_in_state",
"cpufreq/stats/total_trans",
"cpufreq/stats/trans_table",
"isolate",
"regs/identification/midr_el1",
"regs/identification/revidr_el1",
"sched_load_boost",
"topology/core_id",
"topology/core_siblings",
"topology/core_siblings_list",
"topology/cpu_capacity",
"topology/max_cpu_capacity",
"topology/physical_package_id",
"topology/thread_siblings",
"topology/thread_siblings_list",
]
CACHE_FILES = [
"allocation_policy",
"coherency_line_size",
"level",
"number_of_sets",
"shared_cpu_list",
"shared_cpu_map",
"size",
"type",
"ways_of_associativity",
"write_policy",
]
def c_escape(string):
c_string = ""
for c in string:
if c == "\\":
c_string += "\\\\"
elif c == "\"":
c_string += "\\\""
elif c == "\t":
c_string += "\\t"
elif c == "\n":
c_string += "\\n"
elif c == "\r":
c_string += "\\r"
elif ord(c) == 0:
c_string += "\\0"
elif 32 <= ord(c) < 127:
c_string += c
else:
c_string += "x%02X" % ord(c)
return c_string
def adb_shell(commands):
env = os.environ.copy()
env["LC_ALL"] = "C"
adb = subprocess.Popen(["adb", "shell"] + commands, env=env, stdout=subprocess.PIPE)
stdout, _ = adb.communicate()
if adb.returncode == 0:
return stdout
def adb_push(local_path, device_path):
env = os.environ.copy()
env["LC_ALL"] = "C"
adb = subprocess.Popen(["adb", "push", local_path, device_path], env=env)
adb.communicate()
return adb.returncode == 0
def adb_pull(device_path, local_path):
if any(device_path.startswith(prefix) for prefix in SHELL_PREFIX):
content = adb_shell(["cat", device_path])
if content is not None:
if not content.rstrip().endswith("No such file or directory"):
with open(local_path, "wb") as local_file:
local_file.write(content)
return True
else:
env = os.environ.copy()
env["LC_ALL"] = "C"
adb = subprocess.Popen(["adb", "pull", device_path, local_path], env=env)
adb.communicate()
return adb.returncode == 0
def adb_getprop():
properties = adb_shell(["getprop"])
properties_list = list()
while properties:
assert properties.startswith("[")
properties = properties[1:]
key, properties = properties.split("]", 1)
properties = properties.strip()
assert properties.startswith(":")
properties = properties[1:].strip()
assert properties.startswith("[")
properties = properties[1:]
value, properties = properties.split("]", 1)
properties = properties.strip()
properties_list.append((key, value))
return properties_list
def add_mock_file(stream, path, content):
assert content is not None
stream.write("\t{\n")
stream.write("\t\t.path = \"%s\",\n" % path)
stream.write("\t\t.size = %d,\n" % len(content))
if len(content.splitlines()) > 1:
stream.write("\t\t.content =")
for line in content.splitlines(True):
stream.write("\n\t\t\t\"%s\"" % c_escape(line))
stream.write(",\n")
else:
stream.write("\t\t.content = \"%s\",\n" % c_escape(content))
stream.write("\t},\n")
def dump_device_file(stream, path, prefix_line=None):
temp_fd, temp_path = tempfile.mkstemp()
os.close(temp_fd)
try:
if adb_pull(path, temp_path):
with open(temp_path, "rb") as temp_file:
content = temp_file.read()
if prefix_line is not None:
stream.write(prefix_line)
add_mock_file(stream, path, content)
return content
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
def main(args):
options = parser.parse_args(args)
dmesg_content = adb_shell(["dmesg"])
if dmesg_content is not None and dmesg_content.strip() == "klogctl: Operation not permitted":
dmesg_content = None
if dmesg_content is not None:
with open(os.path.join("test", "dmesg", options.prefix + ".log"), "w") as dmesg_dump:
dmesg_dump.write(dmesg_content)
build_prop_content = None
proc_cpuinfo_content = None
proc_cpuinfo_content32 = None
kernel_max = 0
with open(os.path.join("test", "mock", options.prefix + ".h"), "w") as file_header:
file_header.write("struct cpuinfo_mock_file filesystem[] = {\n")
android_props = adb_getprop()
abi = None
for key, value in android_props:
if key == "ro.product.cpu.abi":
abi = value
for path in SYSTEM_FILES:
arm64_prefix = None
if path == "/proc/cpuinfo" and abi == "arm64-v8a":
arm64_prefix = "#if CPUINFO_ARCH_ARM64\n"
content = dump_device_file(file_header, path, prefix_line=arm64_prefix)
if content is not None:
if path == "/proc/cpuinfo":
proc_cpuinfo_content = content
elif path == "/system/build.prop":
build_prop_content = content
elif path == "/sys/devices/system/cpu/kernel_max":
kernel_max = int(content.strip())
if arm64_prefix:
cpuinfo_dump_binary = os.path.join(root_dir, "..", "build", "android", "armeabi-v7a", "cpuinfo-dump")
assert os.path.isfile(cpuinfo_dump_binary)
adb_push(cpuinfo_dump_binary, "/data/local/tmp/cpuinfo-dump")
proc_cpuinfo_content32 = adb_shell(["/data/local/tmp/cpuinfo-dump"])
if proc_cpuinfo_content32:
proc_cpuinfo_content32 = "\n".join(proc_cpuinfo_content32.splitlines())
file_header.write("#elif CPUINFO_ARCH_ARM\n")
add_mock_file(file_header, "/proc/cpuinfo", proc_cpuinfo_content32)
file_header.write("#endif\n")
for cpu in range(kernel_max + 1):
for filename in CPU_FILES:
path = "/sys/devices/system/cpu/cpu%d/%s" % (cpu, filename)
dump_device_file(file_header, path)
for index in range(5):
for filename in CACHE_FILES:
path = "/sys/devices/system/cpu/cpu%d/cache/index%d/%s" % (cpu, index, filename)
dump_device_file(file_header, path)
file_header.write("\t{ NULL },\n")
file_header.write("};\n")
file_header.write("#ifdef __ANDROID__\n")
file_header.write("struct cpuinfo_mock_property properties[] = {\n")
for key, value in android_props:
file_header.write("\t{\n")
file_header.write("\t\t.key = \"%s\",\n" % c_escape(key))
file_header.write("\t\t.value = \"%s\",\n" % c_escape(value))
file_header.write("\t},\n")
file_header.write("\t{ NULL },\n")
file_header.write("};\n")
file_header.write("#endif /* __ANDROID__ */\n")
if proc_cpuinfo_content is not None:
with open(os.path.join("test", "cpuinfo", options.prefix + ".log"), "w") as proc_cpuinfo_dump:
proc_cpuinfo_dump.write(proc_cpuinfo_content)
if proc_cpuinfo_content32 is not None:
with open(os.path.join("test", "cpuinfo", options.prefix + ".armeabi.log"), "w") as proc_cpuinfo_dump32:
proc_cpuinfo_dump32.write(proc_cpuinfo_content32)
if build_prop_content is not None:
with open(os.path.join("test", "build.prop", options.prefix + ".log"), "w") as build_prop_dump:
build_prop_dump.write(build_prop_content)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
import confu
parser = confu.standard_parser("clog configuration script")
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["clog.h"])
with build.options(source_dir="src", extra_include_dirs="src"):
build.static_library("clog", build.cc("clog.c"))
with build.options(source_dir="test", deps={
(build, build.deps.googletest): all,
"log": build.target.is_android}):
build.unittest("clog-test", build.cxx("clog.cc"))
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
|
from typing import Dict, List, Optional, Tuple
import json
import math
from fairseq.data import Dictionary
import torch
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.models import Hypothesis
def get_hypo_tokens(hypo: Hypothesis) -> List[int]:
return hypo[0]
def get_hypo_score(hypo: Hypothesis) -> float:
return hypo[3]
def to_string(input: List[int], tgt_dict: List[str], bos_idx: int = 0, eos_idx: int = 2, separator: str = "",) -> str:
# torchscript dislikes sets
extra_symbols_to_ignore: Dict[int, int] = {}
extra_symbols_to_ignore[eos_idx] = 1
extra_symbols_to_ignore[bos_idx] = 1
# it also dislikes comprehensions with conditionals
filtered_idx: List[int] = []
for idx in input:
if idx not in extra_symbols_to_ignore:
filtered_idx.append(idx)
return separator.join([tgt_dict[idx] for idx in filtered_idx]).replace("\u2581", " ")
def post_process_hypos(
hypos: List[Hypothesis], tgt_dict: List[str],
) -> List[Tuple[str, List[float], List[int]]]:
post_process_remove_list = [
3, # unk
2, # eos
1, # pad
]
hypos_str: List[str] = []
for h in hypos:
filtered_tokens: List[int] = []
for token_index in get_hypo_tokens(h)[1:]:
if token_index not in post_process_remove_list:
filtered_tokens.append(token_index)
string = to_string(filtered_tokens, tgt_dict)
hypos_str.append(string)
hypos_ids = [get_hypo_tokens(h)[1:] for h in hypos]
hypos_score = [[math.exp(get_hypo_score(h))] for h in hypos]
nbest_batch = list(zip(hypos_str, hypos_score, hypos_ids))
return nbest_batch
def _piecewise_linear_log(x):
x[x > math.e] = torch.log(x[x > math.e])
x[x <= math.e] = x[x <= math.e] / math.e
return x
class ModelWrapper(torch.nn.Module):
def __init__(self, tgt_dict: List[str]):
super().__init__()
self.transform = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=400, n_mels=80, hop_length=160)
self.decoder = EMFORMER_RNNT_BASE_LIBRISPEECH.get_decoder()
self.tgt_dict = tgt_dict
with open("global_stats.json") as f:
blob = json.loads(f.read())
self.mean = torch.tensor(blob["mean"])
self.invstddev = torch.tensor(blob["invstddev"])
self.decibel = 2 * 20 * math.log10(32767)
self.gain = pow(10, 0.05 * self.decibel)
def forward(
self, input: torch.Tensor, prev_hypo: Optional[Hypothesis], prev_state: Optional[List[List[torch.Tensor]]]
) -> Tuple[str, Hypothesis, Optional[List[List[torch.Tensor]]]]:
spectrogram = self.transform(input).transpose(1, 0)
features = _piecewise_linear_log(spectrogram * self.gain).unsqueeze(0)[:, :-1]
features = (features - self.mean) * self.invstddev
length = torch.tensor([features.shape[1]])
hypotheses, state = self.decoder.infer(features, length, 10, state=prev_state, hypothesis=prev_hypo)
transcript = post_process_hypos(hypotheses[:1], self.tgt_dict)[0][0]
return transcript, hypotheses[0], state
tgt_dict = Dictionary.load("spm_bpe_4096_fairseq.dict")
wrapper = ModelWrapper(tgt_dict.symbols)
wrapper = torch.jit.script(wrapper)
wrapper.save("scripted_wrapper_tuple.pt")
|
import torch
import torchaudio
from torch.utils.mobile_optimizer import optimize_for_mobile
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
scripted_model = torch.jit.script(wrapper)
optimized_model = optimize_for_mobile(scripted_model)
optimized_model._save_for_lite_interpreter("streaming_asrv2.ptl")
print("Done _save_for_lite_interpreter")
|
import pyaudio
import queue
import numpy as np
import torch
import torchaudio
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
################################################################
data_queue = queue.Queue()
def callback(in_data, frame_count, time_info, status):
global data_queue
data_queue.put(in_data)
return in_data, pyaudio.paContinue
state = None
hypo = None
def transcribe(np_array, should_print=True):
global state, hypo
tensor = torch.tensor(np_array)
transcript, hypo, state = wrapper(tensor, hypo, state)
if should_print and transcript:
print(transcript, end="", flush=True)
previous_right_context = None
def process(should_print=True):
global previous_right_context
if previous_right_context is None:
previous_right_context = [
np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(1)
]
# Get 4 segments.
segments = [
np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(4)
]
current_input = previous_right_context + segments
with torch.no_grad():
transcribe(np.concatenate(current_input), should_print=should_print)
# Save right context.
previous_right_context = current_input[-1:]
# Emformer is configured with input segment size of 4 and right context size of 1.
# Pre- time reduction with factor 4, then, we have an input segment size of 16 and
# right context size of 4 going into RNN-T.
# With a hop length of 160 samples, we then have 16 * 160 = 2560 samples in the input segment
# and 4 * 160 = 640 samples in the right context.
# Then, since the lowest common factor between 640 and 3600 is 640, we'll
# read from the stream in 640-sample increments.
p = pyaudio.PyAudio()
CHANNELS = 1
RATE = 16000
stream = p.open(
format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
input=True,
output=False,
frames_per_buffer=640,
stream_callback=callback,
)
stream.start_stream()
# We need to initialize the model by evaluating
# a few samples.
# If we skip this, evaluation latency will become
# prohibitively large.
print("Initializing model...")
for _ in range(10):
process(should_print=False)
print("Initialization complete.")
data_queue = queue.Queue()
previous_right_context = None
state = None
prev_hypo = None
while stream.is_active():
process(should_print=True)
stream.stop_stream()
stream.close()
|
import torch
import torch.utils.cpp_extension
print(torch.version.__version__)
op_source = """
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
/*type=*/CV_32FC1,
/*data=*/image.data_ptr<float>());
cv::Mat warp_mat(/*rows=*/warp.size(0),
/*cols=*/warp.size(1),
/*type=*/CV_32FC1,
/*data=*/warp.data_ptr<float>());
cv::Mat output_mat;
cv::warpPerspective(image_mat, output_mat, warp_mat, /*dsize=*/{64, 64});
torch::Tensor output =
torch::from_blob(output_mat.ptr<float>(), /*sizes=*/{64, 64});
return output.clone();
}
static auto registry =
torch::RegisterOperators("my_ops::warp_perspective", &warp_perspective);
"""
torch.utils.cpp_extension.load_inline(
name="warp_perspective",
cpp_sources=op_source,
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True,
)
print(torch.ops.my_ops.warp_perspective)
@torch.jit.script
def compute(x, y):
if bool(x[0][0] == 42):
z = 5
else:
z = 10
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + z
compute.save("app/src/main/assets/compute.pt")
|
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('pytorch/vision:v0.11.0', 'deeplabv3_resnet50', pretrained=True)
model.eval()
scripted_module = torch.jit.script(model)
optimized_scripted_module = optimize_for_mobile(scripted_module)
# Export full jit version model (not compatible with lite interpreter)
scripted_module.save("deeplabv3_scripted.pt")
# Export lite interpreter version model (compatible with lite interpreter)
scripted_module._save_for_lite_interpreter("deeplabv3_scripted.ptl")
# using optimized lite interpreter model makes inference about 60% faster than the non-optimized lite interpreter model, which is about 6% faster than the non-optimized full jit model
optimized_scripted_module._save_for_lite_interpreter("deeplabv3_scripted_optimized.ptl")
|
import torch
from torch import Tensor
from torch.utils.mobile_optimizer import optimize_for_mobile
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from transformers import Wav2Vec2ForCTC
# Wav2vec2 model emits sequences of probability (logits) distributions over the characters
# The following class adds steps to decode the transcript (best path)
class SpeechRecognizer(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.labels = [
"<s>", "<pad>", "</s>", "<unk>", "|", "E", "T", "A", "O", "N", "I", "H", "S",
"R", "D", "L", "U", "M", "W", "C", "F", "G", "Y", "P", "B", "V", "K", "'", "X",
"J", "Q", "Z"]
def forward(self, waveforms: Tensor) -> str:
"""Given a single channel speech data, return transcription.
Args:
waveforms (Tensor): Speech tensor. Shape `[1, num_frames]`.
Returns:
str: The resulting transcript
"""
logits, _ = self.model(waveforms) # [batch, num_seq, num_label]
best_path = torch.argmax(logits[0], dim=-1) # [num_seq,]
prev = ''
hypothesis = ''
for i in best_path:
char = self.labels[i]
if char == prev:
continue
if char == '<s>':
prev = ''
continue
hypothesis += char
prev = char
return hypothesis.replace('|', ' ')
# Load Wav2Vec2 pretrained model from Hugging Face Hub
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# Convert the model to torchaudio format, which supports TorchScript.
model = import_huggingface_model(model)
# Remove weight normalization which is not supported by quantization.
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
model = model.eval()
# Attach decoder
model = SpeechRecognizer(model)
# Apply quantization / script / optimize for motbile
quantized_model = torch.quantization.quantize_dynamic(
model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
scripted_model = torch.jit.script(quantized_model)
optimized_model = optimize_for_mobile(scripted_model)
# Sanity check
waveform , _ = torchaudio.load('scent_of_a_woman_future.wav')
print('Result:', optimized_model(waveform))
optimized_model._save_for_lite_interpreter("wav2vec2.ptl")
|
import torch
from pytorchvideo.accelerator.deployment.mobile_cpu.utils.model_conversion import (
convert_to_deployable_form,
)
from pytorchvideo.models.accelerator.mobile_cpu.efficient_x3d import EfficientX3d
from torch.hub import load_state_dict_from_url
from torch.utils.mobile_optimizer import (
optimize_for_mobile,
)
model_efficient_x3d_xs = EfficientX3d(expansion='XS', head_act='identity')
checkpoint_path = 'https://dl.fbaipublicfiles.com/pytorchvideo/model_zoo/kinetics/efficient_x3d_xs_original_form.pyth'
checkpoint = load_state_dict_from_url(checkpoint_path)
model_efficient_x3d_xs.load_state_dict(checkpoint)
input_blob_size = (1, 3, 4, 160, 160)
input_tensor = torch.randn(input_blob_size)
model_efficient_x3d_xs_deploy = convert_to_deployable_form(model_efficient_x3d_xs, input_tensor)
traced_model = torch.jit.trace(model_efficient_x3d_xs_deploy, input_tensor, strict=False)
optimized_traced__model = optimize_for_mobile(traced_model)
optimized_traced__model.save("app/src/main/assets/video_classification.pt")
optimized_traced__model._save_for_lite_interpreter("app/src/main/assets/video_classification.ptl")
|
import torch
from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering
from torch.utils.mobile_optimizer import optimize_for_mobile
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad')
model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased-distilled-squad')
model.eval()
question, text = "When will support for GPU be available?!", "There is a growing need to execute ML models on edge devices to reduce latency, preserve privacy and enable new interactive use cases. In the past, engineers used to train models separately. They would then go through a multi-step, error prone and often complex process to transform the models for execution on a mobile device. The mobile runtime was often significantly different from the operations available during training leading to inconsistent developer and eventually user experience. PyTorch Mobile removes these friction surfaces by allowing a seamless process to go from training to deployment by staying entirely within the PyTorch ecosystem. It provides an end-to-end workflow that simplifies the research to production environment for mobile devices. In addition, it paves the way for privacy-preserving features via Federated Learning techniques. PyTorch Mobile is in beta stage right now and in wide scale production use. It will soon be available as a stable release once the APIs are locked down. Key features of PyTorch Mobile: Available for iOS, Android and Linux; Provides APIs that cover common preprocessing and integration tasks needed for incorporating ML in mobile applications; Support for tracing and scripting via TorchScript IR; Support for XNNPACK floating point kernel libraries for Arm CPUs; Integration of QNNPACK for 8-bit quantized kernels. Includes support for per-channel quantization, dynamic quantization and more; Build level optimization and selective compilation depending on the operators needed for user applications, i.e., the final binary size of the app is determined by the actual operators the app needs; Support for hardware backends like GPU, DSP, NPU will be available soon."
# inputs['input_ids'].size() is 360, the maximum size of the input tokens generated from the user question and text
# on mobile apps, if the size of the input tokens of the text and question is less than 360, padding will be needed to make the model work correctly.
inputs = tokenizer(question, text, return_tensors='pt')
model_dynamic_quantized = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
traced_model = torch.jit.trace(model_dynamic_quantized, inputs['input_ids'], strict=False)
optimized_traced_model = optimize_for_mobile(traced_model)
optimized_traced_model._save_for_lite_interpreter("qa360_quantized.ptl")
# 360 is the length of model input, i.e. the length of the tokenized ids of question+text
|
# based on https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 50
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % 150000 == 0:
torch.save({
'encoder_state_dict': encoder.state_dict(),
'decoder_state_dict': decoder.state_dict(),
'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),
'decoder_optimizer_state_dict': decoder_optimizer.state_dict(),
}, "seq2seq_mt_{}.pt".format(iter))
hidden_size = 256
encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device)
decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder, decoder, 450100, print_every=5000)
encoder = EncoderRNN(input_lang.n_words, hidden_size)
decoder = AttnDecoderRNN(hidden_size, output_lang.n_words)
encoder_optimizer = optim.SGD(encoder.parameters(), lr=0.01)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=0.01)
checkpoint = torch.load("seq2seq_mt_150000.pt", map_location=torch.device('cpu'))
encoder.load_state_dict(checkpoint['encoder_state_dict'])
decoder.load_state_dict(checkpoint['decoder_state_dict'])
encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer_state_dict'])
decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer_state_dict'])
encoder.eval()
decoder.eval()
encoder_input=torch.tensor([429])
encoder_hidden=torch.zeros(1,1,256)
decoder_input1=torch.tensor([[0]])
decoder_input2=torch.zeros(1,1,256)
decoder_input3=torch.zeros(50,256)
# dynamic quantization can be applied to the decoder for its nn.Linear parameters
quantized_decoder = torch.quantization.quantize_dynamic(decoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
traced_encoder = torch.jit.trace(encoder, (encoder_input, encoder_hidden))
traced_decoder = torch.jit.trace(quantized_decoder, (decoder_input1, decoder_input2, decoder_input3))
from torch.utils.mobile_optimizer import optimize_for_mobile
traced_encoder_optimized = optimize_for_mobile(traced_encoder)
traced_encoder_optimized._save_for_lite_interpreter("optimized_encoder_150k.ptl")
traced_decoder_optimized = optimize_for_mobile(traced_decoder)
traced_decoder_optimized._save_for_lite_interpreter("optimized_decoder_150k.ptl")
|
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True)
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
ts_model = torch.jit.script(quantized_model)
optimized_torchscript_model = optimize_for_mobile(ts_model)
optimized_torchscript_model.save("fbdeit.pt")
|
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8):
super().__init__()
self.heads = heads
self.scale = dim ** -0.5
self.to_qkv = nn.Linear(dim, dim * 3, bias=False)
self.to_out = nn.Linear(dim, dim)
def forward(self, x, mask = None):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv=3, h=h)
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, float('-inf'))
del mask
attn = dots.softmax(dim=-1)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, mlp_dim):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads))),
Residual(PreNorm(dim, FeedForward(dim, mlp_dim)))
]))
def forward(self, x, mask=None):
for attn, ff in self.layers:
x = attn(x, mask=mask)
x = ff(x)
return x
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels=3):
super().__init__()
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Linear(patch_dim, dim)
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.transformer = Transformer(dim, depth, heads, mlp_dim)
self.to_cls_token = nn.Identity()
self.mlp_head = nn.Sequential(
nn.Linear(dim, mlp_dim),
nn.GELU(),
nn.Linear(mlp_dim, num_classes)
)
def forward(self, img, mask=None):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
cls_tokens = self.cls_token.expand(img.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding
x = self.transformer(x, mask)
x = self.to_cls_token(x[:, 0])
return self.mlp_head(x)
|
import torch
import torchvision
import time
from vit_pytorch import *
from torch.utils.mobile_optimizer import optimize_for_mobile
torch.manual_seed(42)
DOWNLOAD_PATH = 'data/mnist'
BATCH_SIZE_TRAIN = 100
BATCH_SIZE_TEST = 1000
# 0.1307 and 0.3081 are the mean and std computed on the MNIST training set
transform_mnist = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))])
train_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=True, download=True,
transform=transform_mnist)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE_TRAIN, shuffle=True)
test_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=False, download=True,
transform=transform_mnist)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE_TEST, shuffle=True)
def train_epoch(model, optimizer, data_loader, loss_history):
total_samples = len(data_loader.dataset)
model.train()
for i, (data, target) in enumerate(data_loader):
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if i % 100 == 0:
print('[' + '{:5}'.format(i * len(data)) + '/' + '{:5}'.format(total_samples) +
' (' + '{:3.0f}'.format(100 * i / len(data_loader)) + '%)] Loss: ' +
'{:6.4f}'.format(loss.item()))
loss_history.append(loss.item())
def evaluate(model, data_loader, loss_history):
model.eval()
total_samples = len(data_loader.dataset)
correct_samples = 0
total_loss = 0
with torch.no_grad():
for data, target in data_loader:
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target, reduction='sum')
_, pred = torch.max(output, dim=1)
total_loss += loss.item()
correct_samples += pred.eq(target).sum()
avg_loss = total_loss / total_samples
loss_history.append(avg_loss)
print('\nAverage test loss: ' + '{:.4f}'.format(avg_loss) +
' Accuracy:' + '{:5}'.format(correct_samples) + '/' +
'{:5}'.format(total_samples) + ' (' +
'{:4.2f}'.format(100.0 * correct_samples / total_samples) + '%)\n')
N_EPOCHS = 25
start_time = time.time()
model = ViT(image_size=28, patch_size=7, num_classes=10, channels=1,
dim=64, depth=6, heads=8, mlp_dim=128)
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
train_loss_history, test_loss_history = [], []
for epoch in range(1, N_EPOCHS + 1):
print('Epoch:', epoch)
train_epoch(model, optimizer, train_loader, train_loss_history)
evaluate(model, test_loader, test_loss_history)
print('Execution time:', '{:5.2f}'.format(time.time() - start_time), 'seconds')
with torch.no_grad():
for data, target in test_loader:
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target, reduction='sum')
_, pred = torch.max(output, dim=1)
# the original trained model
torch.save(model, "vit4mnist.pt")
model = torch.load("vit4mnist.pt")
model.eval()
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
dummy_input = torch.zeros(1, 1, 28, 28)
ts_model = torch.jit.trace(quantized_model, dummy_input)
optimized_torchscript_model = optimize_for_mobile(ts_model)
# the quantized, scripted, and optimized model
optimized_torchscript_model._save_for_lite_interpreter("app/src/main/assets/vit4mnist.ptl")
|
#!/usr/bin/env python3
import contextlib
import copy
import os
import unittest
from PIL import Image
import torch
from d2go.export.api import convert_and_export_predictor
from d2go.export.d2_meta_arch import patch_d2_meta_arch
from d2go.runner import create_runner, GeneralizedRCNNRunner
from d2go.model_zoo import model_zoo
from mobile_cv.common.misc.file_utils import make_temp_directory
from d2go.tests.data_loader_helper import LocalImageGenerator, register_toy_dataset
patch_d2_meta_arch()
@contextlib.contextmanager
def create_fake_detection_data_loader(height, width, is_train):
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = runner.get_default_cfg()
cfg.DATASETS.TRAIN = ["default_dataset_train"]
cfg.DATASETS.TEST = ["default_dataset_test"]
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
if is_train:
with register_toy_dataset(
"default_dataset_train", image_generator, num_images=3
):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_dataset(
"default_dataset_test", image_generator, num_images=3
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="default_dataset_test"
)
yield test_loader
def test_export_torchvision_format():
cfg_name = 'faster_rcnn_fbnetv3a_dsmask_C4.yaml'
pytorch_model = model_zoo.get(cfg_name, trained=True)
from typing import List, Dict
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
coco_idx_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
self.coco_idx = torch.tensor(coco_idx_list)
def forward(self, inputs: List[torch.Tensor]):
x = inputs[0].unsqueeze(0) * 255
scale = 320.0 / min(x.shape[-2], x.shape[-1])
x = torch.nn.functional.interpolate(x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True)
out = self.model(x[0])
res : Dict[str, torch.Tensor] = {}
res["boxes"] = out[0] / scale
res["labels"] = torch.index_select(self.coco_idx, 0, out[1])
res["scores"] = out[2]
return inputs, [res]
size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
predictor_path = convert_and_export_predictor(
model_zoo.get_config(cfg_name),
copy.deepcopy(pytorch_model),
"torchscript_int8@tracing",
'./',
data_loader,
)
orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
wrapped_model = Wrapper(orig_model)
# optionally do a forward
wrapped_model([torch.rand(3, 600, 600)])
scripted_model = torch.jit.script(wrapped_model)
scripted_model.save("ObjectDetection/app/src/main/assets/d2go.pt")
if __name__ == '__main__':
test_export_torchvision_format()
|
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v3_small(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
optimized_traced_model = optimize_for_mobile(traced_script_module)
optimized_traced_model._save_for_lite_interpreter("app/src/main/assets/model.pt")
|
'''
USAGE:
python create_csv.py
'''
import pandas as pd
import numpy as np
import os
import joblib
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from imutils import paths
# get all the image paths
image_paths = list(paths.list_images('preprocessed_image'))
# create a DataFrame
data = pd.DataFrame()
labels = []
for i, image_path in tqdm(enumerate(image_paths), total=len(image_paths)):
label = image_path.split(os.path.sep)[-2]
# save the relative path for mapping image to target
data.loc[i, 'image_path'] = image_path
labels.append(label)
labels = np.array(labels)
# one hot encode the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
print(f"The first one hot encoded labels: {labels[0]}")
print(f"Mapping the first one hot encoded label to its category: {lb.classes_[0]}")
print(f"Total instances: {len(labels)}")
for i in range(len(labels)):
index = np.argmax(labels[i])
data.loc[i, 'target'] = int(index)
# shuffle the dataset
data = data.sample(frac=1).reset_index(drop=True)
# save as CSV file
data.to_csv('data.csv', index=False)
# pickle the binarized labels
print('Saving the binarized labels as pickled file')
joblib.dump(lb, 'lb.pkl')
print(data.head(5))
|
'''
USAGE:
python preprocess_image.py --num-images 1200
'''
import os
import cv2
import random
import numpy as np
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num-images', default=1200, type=int,
help='number of images to preprocess for each category')
args = vars(parser.parse_args())
print(f"Preprocessing {args['num_images']} from each category...")
# get all the directory paths
dir_paths = os.listdir('asl_alphabet_train/asl_alphabet_train')
dir_paths.sort()
root_path = 'asl_alphabet_train/asl_alphabet_train'
# get --num-images images from each category
for idx, dir_path in tqdm(enumerate(dir_paths), total=len(dir_paths)):
all_images = os.listdir(f"{root_path}/{dir_path}")
os.makedirs(f"preprocessed_image/{dir_path}", exist_ok=True)
for i in range(args['num_images']): # how many images to preprocess for each category
# generate a random id between 0 and 2999
rand_id = (random.randint(0, 2999))
image = cv2.imread(f"{root_path}/{dir_path}/{all_images[rand_id]}")
image = cv2.resize(image, (224, 224))
cv2.imwrite(f"preprocessed_image/{dir_path}/{dir_path}{i}.jpg", image)
print('DONE')
|
import torch.nn as nn
import torch.nn.functional as F
import joblib
# load the binarized labels
print('Loading label binarizer...')
lb = joblib.load('lb.pkl')
class CustomCNN(nn.Module):
def __init__(self):
super(CustomCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.Conv2d(16, 32, 5)
self.conv3 = nn.Conv2d(32, 64, 3)
self.conv4 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(128, 256)
self.fc2 = nn.Linear(256, len(lb.classes_))
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
bs, _, _, _ = x.shape
x = F.adaptive_avg_pool2d(x, 1).reshape(bs, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
import torch
import joblib
import cnn_models
from torch.utils.mobile_optimizer import optimize_for_mobile
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
scripted_module = torch.jit.script(model)
optimized_scripted_module = optimize_for_mobile(scripted_module)
optimized_scripted_module._save_for_lite_interpreter("asl.ptl")
|
'''
USAGE:
python test.py --img A_test.jpg
'''
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
import argparse
import torchvision.transforms as transforms
import torch.nn.functional as F
import time
import cnn_models
from PIL import Image
# construct the argument parser and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--img', default='../app/src/main/assets/C1.jpg', type=str,
help='path for the image to test on')
args = vars(parser.parse_args())
aug = transforms.Compose([
transforms.Resize((224, 224)),
])
# load label binarizer
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
print(model)
print('Model loaded')
image = Image.open(f"{args['img']}")
image = aug(image)
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
image = torch.tensor(image, dtype=torch.float)
image = image.unsqueeze(0)
print(image.shape)
start = time.time()
outputs = model(image)
_, preds = torch.max(outputs.data, 1)
print('PREDS', preds)
print(f"Predicted output: {lb.classes_[preds]}")
end = time.time()
print(f"{(end-start):.3f} seconds")
|
'''
USAGE:
python train.py --epochs 10
'''
import pandas as pd
import joblib
import numpy as np
import torch
import random
from PIL import Image
import matplotlib.pyplot as plt
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import time
import cnn_models
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
# construct the argument parser and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epochs', default=10, type=int,
help='number of epochs to train the model for')
args = vars(parser.parse_args())
''' SEED Everything '''
def seed_everything(SEED=42):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.benchmark = True
SEED=42
seed_everything(SEED=SEED)
''' SEED Everything '''
# set computation device
device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"Computation device: {device}")
# read the data.csv file and get the image paths and labels
df = pd.read_csv('data.csv')
X = df.image_path.values
y = df.target.values
(xtrain, xtest, ytrain, ytest) = (train_test_split(X, y,
test_size=0.15, random_state=42))
print(f"Training on {len(xtrain)} images")
print(f"Validationg on {len(xtest)} images")
# image dataset module
class ASLImageDataset(Dataset):
def __init__(self, path, labels):
self.X = path
self.y = labels
# apply augmentations
self.aug = transforms.Compose([
transforms.Resize((224, 224))
])
def __len__(self):
return (len(self.X))
def __getitem__(self, i):
image = Image.open(self.X[i])
image = self.aug(image)
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
label = self.y[i]
return torch.tensor(image, dtype=torch.float), torch.tensor(label, dtype=torch.long)
train_data = ASLImageDataset(xtrain, ytrain)
test_data = ASLImageDataset(xtest, ytest)
# dataloaders
trainloader = DataLoader(train_data, batch_size=32, shuffle=True)
testloader = DataLoader(test_data, batch_size=32, shuffle=False)
# model = models.MobineNetV2(pretrained=True, requires_grad=False)
model = cnn_models.CustomCNN().to(device)
print(model)
# total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f"{total_params:,} total parameters.")
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f"{total_trainable_params:,} training parameters.")
# optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001)
# loss function
criterion = nn.CrossEntropyLoss()
# training function
def fit(model, dataloader):
print('Training')
model.train()
running_loss = 0.0
running_correct = 0
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data)/dataloader.batch_size)):
data, target = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
running_correct += (preds == target).sum().item()
loss.backward()
optimizer.step()
train_loss = running_loss/len(dataloader.dataset)
train_accuracy = 100. * running_correct/len(dataloader.dataset)
print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}")
return train_loss, train_accuracy
#validation function
def validate(model, dataloader):
print('Validating')
model.eval()
running_loss = 0.0
running_correct = 0
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader), total=int(len(test_data)/dataloader.batch_size)):
data, target = data[0].to(device), data[1].to(device)
outputs = model(data)
loss = criterion(outputs, target)
running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
running_correct += (preds == target).sum().item()
val_loss = running_loss/len(dataloader.dataset)
val_accuracy = 100. * running_correct/len(dataloader.dataset)
print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}')
return val_loss, val_accuracy
train_loss , train_accuracy = [], []
val_loss , val_accuracy = [], []
start = time.time()
for epoch in range(args['epochs']):
print(f"Epoch {epoch+1} of {args['epochs']}")
train_epoch_loss, train_epoch_accuracy = fit(model, trainloader)
val_epoch_loss, val_epoch_accuracy = validate(model, testloader)
train_loss.append(train_epoch_loss)
train_accuracy.append(train_epoch_accuracy)
val_loss.append(val_epoch_loss)
val_accuracy.append(val_epoch_accuracy)
print("loss: {val_epoch_loss}, accuracy: {val_epoch_accuracy}")
end = time.time()
print('Saving model...')
torch.save(model.state_dict(), 'asl.pth')
|
"""test_bench.py
Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first.
Usage:
python install.py
pytest test_bench.py
See pytest-benchmark help (pytest test_bench.py -h) for additional options
e.g. --benchmark-autosave
--benchmark-compare
-k <filter expression>
...
"""
import os
import pytest
import time
import torch
from components._impl.workers import subprocess_worker
from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml
from torchbenchmark.util.machine_config import get_machine_state
from torchbenchmark.util.metadata_utils import skip_by_metadata
def pytest_generate_tests(metafunc):
# This is where the list of models to test can be configured
# e.g. by using info in metafunc.config
devices = ['cpu', 'cuda']
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
devices.append('mps')
if metafunc.config.option.cpu_only:
devices = ['cpu']
if metafunc.config.option.cuda_only:
devices = ['cuda']
if metafunc.config.option.mps_only:
devices = ['mps']
if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
paths = _list_model_paths()
metafunc.parametrize(
'model_path', paths,
ids=[os.path.basename(path) for path in paths],
scope="class")
metafunc.parametrize('device', devices, scope='class')
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=False,
timer=time.perf_counter,
group='hub',
)
class TestBenchNetwork:
def test_train(self, model_path, device, compiler, benchmark):
try:
if skip_by_metadata(test="train", device=device, extra_args=[], \
metadata=get_metadata_from_yaml(model_path)):
raise NotImplementedError("Test skipped by its metadata.")
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in model_path:
return
task = ModelTask(model_path)
if not task.model_details.exists:
return # Model is not supported.
task.make_model_instance(test="train", device=device)
benchmark(task.invoke)
benchmark.extra_info['machine_state'] = get_machine_state()
benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size')
benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision")
benchmark.extra_info['test'] = 'train'
except NotImplementedError:
print(f'Test train on {device} is not implemented, skipping...')
def test_eval(self, model_path, device, compiler, benchmark, pytestconfig):
try:
if skip_by_metadata(test="eval", device=device, extra_args=[], \
metadata=get_metadata_from_yaml(model_path)):
raise NotImplementedError("Test skipped by its metadata.")
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in model_path:
return
task = ModelTask(model_path)
if not task.model_details.exists:
return # Model is not supported.
task.make_model_instance(test="eval", device=device)
with task.no_grad(disable_nograd=pytestconfig.getoption("disable_nograd")):
benchmark(task.invoke)
benchmark.extra_info['machine_state'] = get_machine_state()
benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size')
benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision")
benchmark.extra_info['test'] = 'eval'
except NotImplementedError:
print(f'Test eval on {device} is not implemented, skipping...')
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=False,
timer=time.perf_counter,
group='hub',
)
class TestWorker:
"""Benchmark SubprocessWorker to make sure we aren't skewing results."""
def test_worker_noop(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
benchmark(lambda: worker.run("pass"))
def test_worker_store(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
benchmark(lambda: worker.store("x", 1))
def test_worker_load(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
worker.store("x", 1)
benchmark(lambda: worker.load("x"))
|
import os
import traceback
import argparse
import importlib
from pathlib import Path
from typing import Dict
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def list_benchmarks() -> Dict[str, str]:
benchmarks = {}
import userbenchmark
bdir = Path(userbenchmark.__file__).parent.resolve()
fb_bdir = bdir.joinpath("fb")
if fb_bdir.exists():
for fb_bm in filter(lambda x: x.is_dir(), fb_bdir.iterdir()):
benchmarks[fb_bm.name] = f"fb.{fb_bm.name}"
for bm in filter(lambda x: x.is_dir() and not x.name == "fb", bdir.iterdir()):
benchmarks[bm.name] = bm.name
return benchmarks
def run():
available_benchmarks = list_benchmarks()
parser = argparse.ArgumentParser(description="Run a TorchBench user benchmark")
parser.add_argument("bm_name", choices=available_benchmarks.keys(), help='name of the user benchmark')
args, bm_args = parser.parse_known_args()
try:
benchmark = importlib.import_module(f"userbenchmark.{available_benchmarks[args.bm_name]}.run")
benchmark.run(bm_args)
except ImportError as e:
print(f"Failed to import user benchmark module {args.bm_name}, error: {str(e)}")
traceback.print_exc()
if __name__ == "__main__":
run()
|
"""
A lightweight runner that just sets up a model and runs one of its functions in a particular configuration.
Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead.
DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to run
has been installed. This script intentionally does not automate or enforce setup steps.
Wall time provided for sanity but is not a sane benchmark measurement.
"""
import argparse
import logging
import random
import string
import time
import traceback
from datetime import datetime
from functools import partial
import numpy as np
import torch
import torch.profiler as profiler
from torchbenchmark import (
load_canary_model_by_name,
load_model_by_name,
ModelNotFoundError,
)
from torchbenchmark.util.experiment.metrics import get_model_flops, get_peak_memory
if not hasattr(torch.version, "git_version"):
from pytorch.benchmark.fb.run_utils import trace_handler, usage_report_logger
else:
usage_report_logger = lambda: None
WARMUP_ROUNDS = 3
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
SUPPORT_DEVICE_LIST.append("mps")
SUPPORT_PROFILE_LIST = [
"record_shapes",
"profile_memory",
"with_stack",
"with_flops",
"with_modules",
]
def run_one_step_with_cudastreams(func, streamcount):
print("Running Utilization Scaling Using Cuda Streams")
streamlist = []
for i in range(1, streamcount + 1, 1):
# create additional streams and prime with load
while len(streamlist) < i :
s = torch.cuda.Stream()
streamlist.append(s)
for s in streamlist:
with torch.cuda.stream(s):
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
# now run benchmark using streams
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for s in streamlist:
with torch.cuda.stream(s):
func()
end_event.record()
torch.cuda.synchronize()
print(f"Cuda StreamCount:{len(streamlist)}")
print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % start_event.elapsed_time(end_event)), sep='')
def printResultSummaryTime(result_summary, metrics_needed=[], model=None, flops_model_analyzer=None, model_flops=None, cpu_peak_mem=None, mem_device_id=None, gpu_peak_mem=None):
if args.device == "cuda":
gpu_time = np.median(list(map(lambda x: x[0], result_summary)))
cpu_walltime = np.median(list(map(lambda x: x[1], result_summary)))
if hasattr(model, "NUM_BATCHES"):
print('{:<20} {:>20}'.format("GPU Time per batch:", "%.3f milliseconds" %
(gpu_time / model.NUM_BATCHES), sep=''))
print('{:<20} {:>20}'.format("CPU Wall Time per batch:", "%.3f milliseconds" %
(cpu_walltime / model.NUM_BATCHES), sep=''))
else:
print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % gpu_time, sep=''))
print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep=''))
else:
cpu_walltime = np.median(list(map(lambda x: x[0], result_summary)))
print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep=''))
# if model_flops is not None, output the TFLOPs per sec
if 'flops' in metrics_needed:
if flops_model_analyzer.metrics_backend_mapping['flops'] == 'dcgm':
tflops_device_id, tflops = flops_model_analyzer.calculate_flops()
else:
flops = model.get_flops()
tflops = flops / (cpu_walltime / 1.0e3) / 1.0e12
print('{:<20} {:>20}'.format("GPU FLOPS:", "%.4f TFLOPs per second" % tflops, sep=''))
if model_flops is not None:
tflops = model_flops / (cpu_walltime / 1.0e3) / 1.0e12
print('{:<20} {:>20}'.format("Model Flops:", "%.4f TFLOPs per second" % tflops, sep=''))
if gpu_peak_mem is not None:
print('{:<20} {:>20}'.format("GPU %d Peak Memory:" % mem_device_id, "%.4f GB" % gpu_peak_mem, sep=''))
if cpu_peak_mem is not None:
print('{:<20} {:>20}'.format("CPU Peak Memory:", "%.4f GB" % cpu_peak_mem, sep=''))
def run_one_step(func, nwarmup=WARMUP_ROUNDS, num_iter=10, model=None, export_metrics_file=None, stress=0, metrics_needed=[], metrics_gpu_backend=None):
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
flops_model_analyzer = None
if 'flops' in metrics_needed:
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
flops_model_analyzer = ModelAnalyzer(export_metrics_file, ['flops'], metrics_gpu_backend)
flops_model_analyzer.start_monitor()
if stress:
cur_time = time.time_ns()
start_time = cur_time
target_time = stress * 1e9 + start_time
num_iter = -1
last_time = start_time
_i = 0
last_it = 0
first_print_out = True
while (not stress and _i < num_iter) or (stress and cur_time < target_time) :
if args.device == "cuda":
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
start_event.record()
func()
end_event.record()
torch.cuda.synchronize()
t1 = time.time_ns()
result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000))
elif args.device == "mps":
t0 = time.time_ns()
func()
t1 = time.time_ns()
wall_latency = t1 - t0
# TODO: modify this to add GPU time as well
result_summary.append([(t1 - t0) / 1_000_000])
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append([(t1 - t0) / 1_000_000])
if stress:
cur_time = time.time_ns()
# print out the status every 10s.
if (cur_time - last_time) >= 10 * 1e9:
if first_print_out:
print('|{:^20}|{:^20}|{:^20}|'.format("Iterations", "Time/Iteration(ms)", "Rest Time(s)"))
first_print_out = False
est = (target_time - cur_time) / 1e9
time_per_it = (cur_time - last_time) / (_i - last_it) / 1e6
print('|{:^20}|{:^20}|{:^20}|'.format("%d" % _i, "%.2f" % time_per_it , "%d" % int(est)))
last_time = cur_time
last_it = _i
_i += 1
if flops_model_analyzer is not None:
flops_model_analyzer.stop_monitor()
flops_model_analyzer.aggregate()
cpu_peak_mem = None
gpu_peak_mem = None
mem_device_id = None
model_flops = None
if 'cpu_peak_mem' in metrics_needed or 'gpu_peak_mem' in metrics_needed:
cpu_peak_mem, mem_device_id, gpu_peak_mem = get_peak_memory(func, model.device, export_metrics_file=export_metrics_file, metrics_needed=metrics_needed, metrics_gpu_backend=metrics_gpu_backend)
if 'model_flops' in metrics_needed:
model_flops = get_model_flops(model)
printResultSummaryTime(result_summary, metrics_needed, model, flops_model_analyzer, model_flops, cpu_peak_mem, mem_device_id, gpu_peak_mem)
def profile_one_step(func, nwarmup=WARMUP_ROUNDS):
activity_groups = []
result_summary = []
device_to_activity = {'cuda': profiler.ProfilerActivity.CUDA, 'cpu': profiler.ProfilerActivity.CPU}
if args.profile_devices:
activity_groups = [
device_to_activity[device] for device in args.profile_devices if (device in device_to_activity)
]
else:
if args.device == 'cuda':
activity_groups = [
profiler.ProfilerActivity.CUDA,
profiler.ProfilerActivity.CPU,
]
elif args.device == 'cpu':
activity_groups = [profiler.ProfilerActivity.CPU]
profile_opts = {}
for opt in SUPPORT_PROFILE_LIST:
profile_opts[opt] = True if args.profile_options is not None and opt in args.profile_options else False
if args.profile_eg:
from datetime import datetime
import os
from torch.profiler import ExecutionTraceObserver
start_time = datetime.now()
timestamp = int(datetime.timestamp(start_time))
eg_file = f"{args.model}_{timestamp}_eg.json"
eg = ExecutionTraceObserver()
if not os.path.exists(args.profile_eg_folder):
os.makedirs(args.profile_eg_folder)
eg.register_callback(f"{args.profile_eg_folder}/{eg_file}")
nwarmup = 0
eg.start()
with profiler.profile(
schedule=profiler.schedule(wait=0, warmup=nwarmup, active=1, repeat=1),
activities=activity_groups,
record_shapes=args.profile_detailed if args.profile_detailed else profile_opts["record_shapes"],
profile_memory=args.profile_detailed if args.profile_detailed else profile_opts["profile_memory"],
with_stack=args.profile_detailed if args.profile_detailed else profile_opts["with_stack"],
with_flops=args.profile_detailed if args.profile_detailed else profile_opts["with_flops"],
with_modules=args.profile_detailed if args.profile_detailed else profile_opts["with_modules"],
on_trace_ready= partial(trace_handler, f"torchbench_{args.model}") if (not hasattr(torch.version, "git_version") and args.profile_export_chrome_trace) else profiler.tensorboard_trace_handler(args.profile_folder),
) as prof:
if args.device == "cuda":
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for i in range(nwarmup + 1):
t0 = time.time_ns()
start_event.record()
func()
torch.cuda.synchronize() # Need to sync here to match run_one_step()'s timed run.
end_event.record()
t1 = time.time_ns()
if i >= nwarmup:
result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000))
prof.step()
else:
for i in range(nwarmup + 1):
t0 = time.time_ns()
func()
t1 = time.time_ns()
if i >= nwarmup:
result_summary.append([(t1 - t0) / 1_000_000])
prof.step()
if args.profile_eg and eg:
eg.stop()
eg.unregister_callback()
print(f"Save Exeution Trace to : {args.profile_eg_folder}/{eg_file}")
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=30))
print(f"Saved TensorBoard Profiler traces to {args.profile_folder}.")
printResultSummaryTime(result_summary)
def _validate_devices(devices: str):
devices_list = devices.split(",")
valid_devices = SUPPORT_DEVICE_LIST
for d in devices_list:
if d not in valid_devices:
raise ValueError(f'Invalid device {d} passed into --profile-devices. Expected devices: {valid_devices}.')
return devices_list
def _validate_profile_options(profile_options: str):
profile_options_list = profile_options.split(",")
for opt in profile_options_list:
if opt not in SUPPORT_PROFILE_LIST:
raise ValueError(f'Invalid profile option {opt} passed into --profile-options. Expected options: {SUPPORT_PROFILE_LIST}.')
return profile_options_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"model",
help="Full or partial name of a model to run. If partial, picks the first match.",
)
parser.add_argument(
"-d",
"--device",
choices=SUPPORT_DEVICE_LIST,
default="cpu",
help="Which device to use.",
)
parser.add_argument(
"-t",
"--test",
choices=["eval", "train"],
default="eval",
help="Which test to run.",
)
parser.add_argument(
"--profile", action="store_true", help="Run the profiler around the function"
)
parser.add_argument(
"--profile-options",
type=_validate_profile_options,
help=f"Select which profile options to enable. Valid options: {SUPPORT_PROFILE_LIST}.",
)
parser.add_argument("--amp", action="store_true", help="enable torch.autocast()")
parser.add_argument(
"--profile-folder",
default="./logs",
help="Save profiling model traces to this directory.",
)
parser.add_argument(
"--profile-detailed",
action="store_true",
help=f"Enable all profile options, including {SUPPORT_PROFILE_LIST}. Overrides --profile-options.",
)
parser.add_argument(
"--profile-export-chrome-trace",
action="store_true",
help="Export Chrome tracing files. (internal only)",
)
parser.add_argument(
"--profile-devices",
type=_validate_devices,
help="Profile comma separated list of activities such as cpu,cuda.",
)
parser.add_argument(
"--profile-eg", action="store_true", help="Collect execution trace by PARAM"
)
parser.add_argument(
"--profile-eg-folder",
default="./eg_logs",
help="Save execution traces to this directory.",
)
parser.add_argument(
"--cudastreams",
action="store_true",
help="Utilization test using increasing number of cuda streams.",
)
parser.add_argument("--bs", type=int, help="Specify batch size to the test.")
parser.add_argument(
"--export-metrics",
action="store_true",
help="Export all specified metrics records to a csv file. The default csv file name is [model_name]_all_metrics.csv.",
)
parser.add_argument(
"--stress",
type=float,
default=0,
help="Specify execution time (seconds) to stress devices.",
)
parser.add_argument(
"--metrics",
type=str,
default="cpu_peak_mem,gpu_peak_mem",
help="Specify metrics [cpu_peak_mem,gpu_peak_mem,flops,model_flops]to be collected. You can also set `none` to disable all metrics. The metrics are separated by comma such as cpu_peak_mem,gpu_peak_mem.",
)
parser.add_argument(
"--metrics-gpu-backend",
choices=["dcgm", "default"],
default="default",
help="""Specify the backend [dcgm, default] to collect metrics. \nIn default mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally,
\n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process(). \n - you can specify gpu peak memory usage by --metrics gpu_peak_mem, and it is collected by nvml library.\n - you can specify flops by --metrics flops, and it is collected by fvcore.\nIn dcgm mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally,\n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process().\n - you can specify cpu and gpu peak memory usage by --metrics cpu_peak_mem,gpu_peak_mem, and they are collected by dcgm library.""",
)
parser.add_argument(
"--channels-last", action="store_true", help="enable torch.channels_last()"
)
args, extra_args = parser.parse_known_args()
if args.cudastreams and not args.device == "cuda":
print("cuda device required to use --cudastreams option!")
exit(-1)
# Log the tool usage
usage_report_logger()
found = False
Model = None
try:
Model = load_model_by_name(args.model)
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(f"Warning: The model {args.model} cannot be found at core set.")
if not Model:
try:
Model = load_canary_model_by_name(args.model)
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(
f"Error: The model {args.model} cannot be found at either core or canary model set."
)
exit(-1)
m = Model(
device=args.device,
test=args.test,
batch_size=args.bs,
extra_args=extra_args,
)
if m.dynamo:
mode = f"dynamo {m.opt_args.torchdynamo}"
elif m.opt_args.backend:
mode = f"{m.opt_args.backend}"
else:
mode = "eager"
print(
f"Running {args.test} method from {Model.name} on {args.device} in {mode} mode with input batch size {m.batch_size} and precision {m.dargs.precision}."
)
if "--accuracy" in extra_args:
print("{:<20} {:>20}".format("Accuracy: ", str(m.accuracy)), sep="")
exit(0)
if args.channels_last:
m.enable_channels_last()
test = m.invoke
if args.amp:
test = torch.autocast(m.device)(test)
metrics_needed = (
[_ for _ in args.metrics.split(",") if _.strip()] if args.metrics else []
)
if "none" in metrics_needed:
metrics_needed = []
# only enabled gpu_peak_mem for cuda device
if args.device != "cuda" and "gpu_peak_mem" in metrics_needed:
metrics_needed.remove("gpu_peak_mem")
metrics_needed = list(set(metrics_needed))
metrics_gpu_backend = args.metrics_gpu_backend
if metrics_needed:
if metrics_gpu_backend == "dcgm":
from components.model_analyzer.TorchBenchAnalyzer import check_dcgm
check_dcgm()
elif "gpu_peak_mem" in metrics_needed:
from components.model_analyzer.TorchBenchAnalyzer import check_nvml
check_nvml()
if "gpu_peak_mem" in metrics_needed or (
"flops" in metrics_needed and metrics_gpu_backend == "dcgm"
):
assert (
args.device == "cuda"
), "gpu_peak_mem and flops:dcgm are only available for cuda device."
if "flops" in metrics_needed and metrics_gpu_backend == "default":
assert hasattr(
m, "get_flops"
), f"The model {args.model} does not support calculating flops."
m.get_flops()
if args.export_metrics:
if not args.metrics:
print("You have to specifiy at least one metrics to export.")
exit(-1)
export_metrics_file = "%s_all_metrics.csv" % args.model
else:
export_metrics_file = None
if args.profile:
profile_one_step(test)
elif args.cudastreams:
run_one_step_with_cudastreams(test, 10)
else:
run_one_step(
test,
model=m,
export_metrics_file=export_metrics_file,
stress=args.stress,
metrics_needed=metrics_needed,
metrics_gpu_backend=args.metrics_gpu_backend,
)
# Print dynamo compilation metrics, if there are any.
try:
if m.pt2_compilation_time:
print(
"{:<20} {:>18}".format(
"PT2 Compilation time: ", "%.3f seconds" % m.pt2_compilation_time
),
sep="",
)
if m.pt2_graph_breaks:
print(
"{:<20} {:>18}".format(
"PT2 Graph Breaks: ", "%.3f" % m.pt2_graph_breaks
),
sep="",
)
except:
pass
|
import os
import pytest
import torch
from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured
def pytest_addoption(parser):
parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"])
parser.addoption("--ignore_machine_config",
action='store_true',
help="Disable checks/assertions for machine configuration for stable benchmarks")
parser.addoption("--disable_nograd", action='store_true',
help="Disable no_grad for eval() runs")
parser.addoption("--cpu_only", action='store_true',
help="Run benchmarks on cpu only and ignore machine configuration checks")
parser.addoption("--cuda_only", action='store_true',
help="Run benchmarks on cuda only and ignore machine configuration checks")
parser.addoption("--mps_only", action='store_true',
help="Run benchmarks on mps only and ignore machine configuration checks")
def set_fuser(fuser):
if fuser == "te":
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser == "old":
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser == "nvfuser":
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_can_fuse_on_cpu()
torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_nvfuser_enabled(True)
else:
# pytest_addoption should always set the default fuser
assert(False)
def pytest_sessionstart(session):
try:
check_machine_configured()
except Exception as e:
if not session.config.getoption('ignore_machine_config'):
pytest.exit(f"{e}\nSee README.md for machine tuning script usage, or use --ignore_machine_config")
def pytest_configure(config):
set_fuser(config.getoption("fuser"))
def pytest_benchmark_update_machine_info(config, machine_info):
machine_info['pytorch_version'] = torch.__version__
machine_info['pytorch_git_version'] = torch.version.git_version
machine_info['cuda_version'] = torch.version.cuda
try:
import torchvision
machine_info['torchvision_version'] = torchvision.__version__
except ImportError:
machine_info['torchvision_version'] = '*not-installed*'
machine_info['github_run_id'] = os.environ.get("GITHUB_RUN_ID")
machine_info['torchbench_score_version'] = os.environ.get("TORCHBENCH_VER")
try:
# if running on unexpected machine/os, get_machine_config _may_ not work
machine_info['torchbench_machine_config'] = get_machine_config()
except Exception:
if not config.getoption('ignore_machine_config'):
raise
|
import time
import torch
import argparse
import json
from dataclasses import asdict
from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name
from typing import Dict
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
def run(func) -> Dict[str, float]:
if torch.cuda.is_available():
torch.cuda.synchronize()
result = {}
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
if torch.cuda.is_available():
torch.cuda.synchronize()
t2 = time.time_ns()
result["latency_ms"] = (t2 - t0) / 1_000_000.0
return result
def gen_result(m, run_result):
num_epochs = getattr(m, "num_epochs", 1)
r = E2EBenchmarkResult(device=m.device, device_num=m.device_num,
test=m.test, num_examples=m.num_examples,
num_epochs=num_epochs, batch_size=m.batch_size, result=dict())
r.result["latency"] = run_result["latency_ms"] / 1000.0
r.result["qps"] = r.num_examples / r.result["latency"] * r.num_epochs
# add accuracy result if available
if hasattr(m, "accuracy"):
r.result["accuracy"] = m.accuracy
return r
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("model", help="Full name of the end-to-end model.")
parser.add_argument("-t", "--test", choices=["eval", "train"], default="eval", help="Which test to run.")
parser.add_argument("--bs", type=int, help="Specify batch size.")
args, extra_args = parser.parse_known_args()
found = False
Model = load_e2e_model_by_name(args.model)
if not Model:
print(f"Unable to find model matching {args.model}.")
exit(-1)
m = Model(test=args.test, batch_size=args.bs, extra_args=extra_args)
test = getattr(m, args.test)
result = gen_result(m, run(test))
result_json = json.dumps(asdict(result))
print(result_json)
|
"""
A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime.
"""
import argparse
from copy import deepcopy
import os
import yaml
from typing import Any, Dict, List, Tuple
import torch
from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetails, str_to_bool
TIMEOUT = 300 # seconds
torchbench_dir = 'torchbenchmark'
model_dir = 'models'
_DEFAULT_METADATA_ = {
'train_benchmark': True,
'train_deterministic': False,
'eval_benchmark': True,
'eval_deterministic': False,
'eval_nograd': True,
# 'origin': None,
# 'train_dtype': 'float32',
# 'eval_dtype': 'float32',
}
def _parser_helper(input):
return None if input is None else str_to_bool(str(input))
def _process_model_details_to_metadata(train_detail: ModelDetails, eval_detail: ModelDetails) -> Dict[str, Any]:
metadata = {}
for k, v in _DEFAULT_METADATA_.items():
if hasattr(train_detail, k):
metadata[k] = getattr(train_detail, k)
elif train_detail and k in train_detail.metadata:
metadata[k] = train_detail.metadata[k]
elif eval_detail and k in eval_detail.metadata:
metadata[k] = eval_detail.metadata[k]
else:
metadata[k] = v
return metadata
def _extract_detail(path: str) -> Dict[str, Any]:
name = os.path.basename(path)
device = "cuda"
t_detail = None
e_detail = None
# Separate train and eval to isolated processes.
task_t = ModelTask(path, timeout=TIMEOUT)
try:
task_t.make_model_instance(device=device)
task_t.set_train()
task_t.train()
task_t.extract_details_train()
task_t.del_model_instance()
t_detail = deepcopy(task_t._details)
except NotImplementedError:
print(f'Model {name} train is not fully implemented. skipping...')
del task_t
task_e = ModelTask(path, timeout=TIMEOUT)
try:
task_e.make_model_instance(device=device)
task_e.set_eval()
task_e.eval()
task_e.extract_details_eval()
task_e.del_model_instance()
e_detail = deepcopy(task_e._details)
except NotImplementedError:
print(f'Model {name} eval is not fully implemented. skipping...')
del task_e
return _process_model_details_to_metadata(t_detail, e_detail)
def _extract_all_details(model_names: List[str]) -> List[Tuple[str, Dict[str, Any]]]:
details = []
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
if model_name not in model_names:
continue
ed = _extract_detail(model_path)
details.append((model_path, ed))
return details
def _print_extracted_details(extracted_details: List[Tuple[str, Dict[str, Any]]]):
for path, ex_detail in extracted_details:
name = os.path.basename(path)
print(f'Model: {name} , Details: {ex_detail}')
def _maybe_override_extracted_details(args, extracted_details: List[Tuple[str, Dict[str, Any]]]):
for _path, ex_detail in extracted_details:
if args.train_benchmark is not None:
ex_detail['train_benchmark'] = args.train_benchmark
elif args.train_deterministic is not None:
ex_detail['train_deterministic'] = args.train_deterministic
elif args.eval_benchmark is not None:
ex_detail['eval_benchmark'] = args.eval_benchmark
elif args.eval_deterministic is not None:
ex_detail['eval_deterministic'] = args.eval_deterministic
elif args.eval_nograd is not None:
ex_detail['eval_nograd'] = args.eval_nograd
def _write_metadata_yaml_files(extracted_details: List[Tuple[str, Dict[str, Any]]]):
for path, ex_detail in extracted_details:
metadata_path = path + "/metadata.yaml"
with open(metadata_path, 'w') as file:
yaml.dump(ex_detail, file)
print(f"Processed file: {metadata_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--model", default=None,
help="Full name of a model to update. If absent, applies to all models.")
parser.add_argument("--extract-only", default=False, action="store_true",
help="Only extract model details.")
parser.add_argument("--train-benchmark", default=None, type=_parser_helper,
help="Whether to enable PyTorch benchmark mode during train.")
parser.add_argument("--train-deterministic", default=None, type=_parser_helper,
help="Whether to enable deterministic during train.")
parser.add_argument("--eval-benchmark", default=None, type=_parser_helper,
help="Whether to enable PyTorch benchmark mode during eval.")
parser.add_argument("--eval-deterministic", default=None, type=_parser_helper,
help="Whether to enable deterministic during eval.")
parser.add_argument("--eval-nograd", default=None, type=_parser_helper,
help="Whether to enable no_grad during eval.")
# parser.add_argument("--origin", default=None,
# help="Location of benchmark's origin. Such as torchaudio or torchvision.")
# parser.add_argument("--train-dtype", default=None,
# choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform training.")
# parser.add_argument("--eval-dtype", default=None,
# choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform eval.")
args = parser.parse_args()
# Only allow this script for cuda for now.
if not torch.cuda.is_available():
print("This tool is currently only supported when the system has a cuda device.")
exit(1)
# Find the matching model, or use all models.
models = []
model_names = []
if args.model is not None:
Model = load_model_by_name(args.model)
if not Model:
print(f"Unable to find model matching: {args.model}.")
exit(-1)
models.append(Model)
model_names.append(Model.name)
print(f"Generating metadata to select model: {model_names}.")
else:
models.extend(list_models(model_match=args.model))
model_names.extend([m.name for m in models])
print("Generating metadata to all models.")
# Extract all model details from models.
extracted_details = _extract_all_details(model_names)
print("Printing extracted metadata.")
_print_extracted_details(extracted_details)
# Stop here for extract-only.
if args.extract_only:
print("--extract-only is set. Stop here.")
exit(0)
# Apply details passed in by flags.
_maybe_override_extracted_details(args, extracted_details)
print("Printing metadata after applying any modifications.")
_print_extracted_details(extracted_details)
# TODO: Modify and update the model to apply metadata changes by the user.
# Generate metadata files for each matching models.
_write_metadata_yaml_files(extracted_details)
|
"""test.py
Setup and Run hub models.
Make sure to enable an https proxy if necessary, or the setup steps may hang.
"""
# This file shows how to use the benchmark suite from user end.
import gc
import functools
import os
import traceback
import unittest
from unittest.mock import patch
import yaml
import torch
from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml
from torchbenchmark.util.metadata_utils import skip_by_metadata
# Some of the models have very heavyweight setup, so we have to set a very
# generous limit. That said, we don't want the entire test suite to hang if
# a single test encounters an extreme failure, so we give up after a test is
# unresponsive to 5 minutes by default. (Note: this does not require that the
# entire test case completes in 5 minutes. It requires that if the worker is
# unresponsive for 5 minutes the parent will presume it dead / incapacitated.)
TIMEOUT = int(os.getenv("TIMEOUT", 300)) # Seconds
class TestBenchmark(unittest.TestCase):
def setUp(self):
gc.collect()
def tearDown(self):
gc.collect()
def _create_example_model_instance(task: ModelTask, device: str):
skip = False
try:
task.make_model_instance(test="eval", device=device, extra_args=["--accuracy"])
except NotImplementedError:
try:
task.make_model_instance(test="train", device=device, extra_args=["--accuracy"])
except NotImplementedError:
skip = True
finally:
if skip:
raise NotImplementedError(f"Model is not implemented on the device {device}")
def _load_test(path, device):
def _skip_cuda_memory_check_p(metadata):
if device != "cuda":
return True
if "skip_cuda_memory_leak" in metadata and metadata["skip_cuda_memory_leak"]:
return True
return False
def example_fn(self):
task = ModelTask(path, timeout=TIMEOUT)
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
_create_example_model_instance(task, device)
accuracy = task.get_model_attribute("accuracy")
assert accuracy == "pass" or accuracy == "eager_1st_run_OOM", f"Expected accuracy pass, get {accuracy}"
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method `get_module()` on {device} is not implemented, skipping...')
def train_fn(self):
metadata = get_metadata_from_yaml(path)
task = ModelTask(path, timeout=TIMEOUT)
allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True)
# to speedup test, use batch size 1 if possible
batch_size = 1 if allow_customize_batch_size else None
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="train", device=device, batch_size=batch_size)
task.invoke()
task.check_details_train(device=device, md=metadata)
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method train on {device} is not implemented, skipping...')
def eval_fn(self):
metadata = get_metadata_from_yaml(path)
task = ModelTask(path, timeout=TIMEOUT)
allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True)
# to speedup test, use batch size 1 if possible
batch_size = 1 if allow_customize_batch_size else None
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="eval", device=device, batch_size=batch_size)
task.invoke()
task.check_details_eval(device=device, md=metadata)
task.check_eval_output()
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method eval on {device} is not implemented, skipping...')
def check_device_fn(self):
task = ModelTask(path, timeout=TIMEOUT)
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="eval", device=device)
task.check_device()
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method check_device on {device} is not implemented, skipping...')
name = os.path.basename(path)
metadata = get_metadata_from_yaml(path)
for fn, fn_name in zip([example_fn, train_fn, eval_fn, check_device_fn],
["example", "train", "eval", "check_device"]):
# set exclude list based on metadata
setattr(TestBenchmark, f'test_{name}_{fn_name}_{device}',
(unittest.skipIf(skip_by_metadata(test=fn_name, device=device, extra_args=[], metadata=metadata), \
"This test is skipped by its metadata")(fn)))
def _load_tests():
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
devices.append('mps')
if device := os.getenv('ACCELERATOR'):
devices.append(device)
for path in _list_model_paths():
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in path:
continue
for device in devices:
_load_test(path, device)
_load_tests()
if __name__ == '__main__':
unittest.main()
|
import argparse
import subprocess
import os
import sys
from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https
from userbenchmark import list_userbenchmarks
from pathlib import Path
REPO_ROOT = Path(__file__).parent
def pip_install_requirements(requirements_txt="requirements.txt"):
if not _test_https():
print(proxy_suggestion)
sys.exit(-1)
try:
subprocess.run([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_txt],
check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return (False, e.output)
except Exception as e:
return (False, e)
return True, None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("models", nargs='*', default=[],
help="Specify one or more models to install. If not set, install all models.")
parser.add_argument("--test-mode", action="store_true", help="Run in test mode and check package versions")
parser.add_argument("--canary", action="store_true", help="Install canary model.")
parser.add_argument("--continue_on_fail", action="store_true")
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--userbenchmark", choices=list_userbenchmarks(), help="Install requirements for optional components.")
args = parser.parse_args()
os.chdir(os.path.realpath(os.path.dirname(__file__)))
print(f"checking packages {', '.join(TORCH_DEPS)} are installed...", end="", flush=True)
try:
versions = get_pkg_versions(TORCH_DEPS)
except ModuleNotFoundError as e:
print("FAIL")
print(f"Error: Users must first manually install packages {TORCH_DEPS} before installing the benchmark.")
sys.exit(-1)
print("OK")
if args.userbenchmark:
# Install userbenchmark dependencies if exists
userbenchmark_dir = REPO_ROOT.joinpath("userbenchmark", args.userbenchmark)
if userbenchmark_dir.joinpath("install.py").is_file():
subprocess.check_call([sys.executable, "install.py"], cwd=userbenchmark_dir.absolute())
sys.exit(0)
success, errmsg = pip_install_requirements()
if not success:
print("Failed to install torchbenchmark requirements:")
print(errmsg)
if not args.continue_on_fail:
sys.exit(-1)
from torchbenchmark import setup
success &= setup(models=args.models, verbose=args.verbose, continue_on_fail=args.continue_on_fail, test_mode=args.test_mode, allow_canary=args.canary)
if not success:
if args.continue_on_fail:
print("Warning: some benchmarks were not installed due to failure")
else:
raise RuntimeError("Failed to complete setup")
new_versions = get_pkg_versions(TORCH_DEPS)
if versions != new_versions:
print(f"The torch packages are re-installed after installing the benchmark deps. \
Before: {versions}, after: {new_versions}")
sys.exit(-1)
|
"""
The regression detector of TorchBench Userbenchmark.
"""
import json
import argparse
import importlib
from dataclasses import asdict
import os
import yaml
from pathlib import Path
import time
from datetime import datetime
from typing import Any, List, Dict, Optional
from userbenchmark.utils import PLATFORMS, USERBENCHMARK_OUTPUT_PREFIX, REPO_PATH, \
TorchBenchABTestResult, get_date_from_metrics, \
get_ub_name, get_latest_files_in_s3_from_last_n_days, get_date_from_metrics_s3_key
from utils.s3_utils import S3Client, USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT
GITHUB_ISSUE_TEMPLATE = """
TorchBench CI has detected a performance signal or runtime regression.
Base PyTorch commit: {start}
Affected PyTorch commit: {end}
Affected Tests:
{test_details}
Tests that were no longer run on affected commit:
{control_only_tests}
Tests that were newly added on affected commit:
{treatment_only_tests}
Runtime regressions found?
{runtime_regressions_msg}
GitHub workflow that triggered this issue: {github_run_url}
cc {owner}
"""
DEFAULT_GH_ISSUE_OWNER = "@xuzhao9"
def get_default_output_path(bm_name: str) -> str:
# By default, write result to $REPO_DIR/.userbenchmark/<userbenchmark-name>/regression-<time>.json
output_path = os.path.join(REPO_PATH, USERBENCHMARK_OUTPUT_PREFIX, bm_name)
fname = "regression-{}.yaml".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
return os.path.join(output_path, fname)
def generate_regression_result(control: Dict[str, Any], treatment: Dict[str, Any]) -> TorchBenchABTestResult:
def _call_userbenchmark_detector(detector, control: Dict[str, Any], treatment: Dict[str, Any]) -> TorchBenchABTestResult:
return detector(control, treatment)
assert control["name"] == treatment["name"], f'Expected the same userbenchmark name from metrics files, \
but getting {control["name"]} and {treatment["name"]}.'
bm_name = control["name"]
detector = importlib.import_module(f"userbenchmark.{bm_name}.regression_detector").run
# Process control and treatment to include only shared keys
filtered_control_metrics = {}
control_only_metrics = {}
filtered_treatment_metrics = {}
treatment_only_metrics = {}
for control_name, control_metric in control["metrics"].items():
if control_name in treatment["metrics"]:
filtered_control_metrics[control_name] = control_metric
else:
control_only_metrics[control_name] = control_metric
for treatment_name, treatment_metric in treatment["metrics"].items():
if treatment_name in control["metrics"]:
filtered_treatment_metrics[treatment_name] = treatment_metric
else:
treatment_only_metrics[treatment_name] = treatment_metric
control["metrics"] = filtered_control_metrics
treatment["metrics"] = filtered_treatment_metrics
assert filtered_control_metrics.keys() == filtered_treatment_metrics.keys()
# Local file comparison, return the regression detection result object
result = _call_userbenchmark_detector(detector, control, treatment)
result.control_only_metrics = control_only_metrics
result.treatment_only_metrics = treatment_only_metrics
return result
def process_regressions_into_yaml(regression_result: TorchBenchABTestResult, output_path: str, control_file: str, treatment_file: str) -> None:
if not len(regression_result.details) and \
not len(regression_result.control_only_metrics) and \
not len(regression_result.treatment_only_metrics):
print(f"No performance signal detected between file {control_file} and {treatment_file}.")
return
# create the output directory if doesn't exist
output_dir = Path(os.path.dirname(output_path))
output_dir.mkdir(parents=True, exist_ok=True)
output_yaml_str = yaml.safe_dump(asdict(regression_result), sort_keys=False)
print(output_yaml_str)
with open(output_path, "w") as ofptr:
ofptr.write(output_yaml_str)
print(f"Wrote above yaml to {output_path}.")
def process_regressions_into_gh_issue(regression_result: TorchBenchABTestResult, owner: str, output_path: str, errors_path: str) -> None:
regressions_dict = asdict(regression_result)
troubled_tests = ""
for test, stats in regressions_dict["details"].items():
delta = stats["delta"]
if delta != 0:
sign = "+" if delta > 0 else ""
troubled_tests += f"- {test}: {sign}{delta:.5%}\n"
control_only_tests = ""
for test, stat in regressions_dict["control_only_metrics"].items():
control_only_tests += f"- {test}: {stat}\n"
treatment_only_tests = ""
for test, stat in regressions_dict["treatment_only_metrics"].items():
treatment_only_tests += f"- {test}: {stat}\n"
control_commit = regressions_dict["control_env"]["pytorch_git_version"]
treatment_commit = regressions_dict["treatment_env"]["pytorch_git_version"]
runtime_regressions_msg = "No runtime errors were found in the " + \
"new benchmarks run--you are all good there!"
errors_log_exists = Path(errors_path).exists()
if errors_log_exists:
runtime_regressions_msg = "An errors log was found. Please investigate runtime " + \
"errors by looking into the logs of the workflow linked."
if troubled_tests == "" and control_only_tests == "" and treatment_only_tests == "" and not errors_log_exists:
print(f"No regressions found between {control_commit} and {treatment_commit}.")
return
if "GITHUB_ENV" in os.environ:
fname = os.environ["GITHUB_ENV"]
content = f"TORCHBENCH_REGRESSION_DETECTED='{treatment_commit}'\n"
with open(fname, 'a') as fo:
fo.write(content)
github_run_id = os.environ.get("GITHUB_RUN_ID", None)
github_run_url = "No URL found, please look for the failing action in " + \
"https://github.com/pytorch/benchmark/actions"
if github_run_id is not None:
github_run_url = f"https://github.com/pytorch/benchmark/actions/runs/{github_run_id}"
issue_config: Dict[str, str] = {
"start": control_commit,
"end": treatment_commit,
"test_details": troubled_tests,
"control_only_tests": control_only_tests,
"treatment_only_tests": treatment_only_tests,
"runtime_regressions_msg": runtime_regressions_msg,
"github_run_url": github_run_url,
"owner": owner
}
issue_body = GITHUB_ISSUE_TEMPLATE.format(**issue_config)
print(issue_body)
with open(output_path, "w") as f:
f.write(issue_body)
def get_best_start_date(latest_metrics_jsons: List[str], end_date: datetime) -> Optional[datetime]:
"""Get the date closest to `end_date` from `latest_metrics_jsons`"""
for metrics_json in latest_metrics_jsons:
start_datetime = get_date_from_metrics_s3_key(metrics_json)
if start_datetime < end_date:
return start_datetime
return None
def get_metrics_by_date(latest_metrics_jsons: List[str], pick_date: datetime):
pick_metrics_json_key: Optional[str] = None
for metrics_json_key in latest_metrics_jsons:
metric_datetime = get_date_from_metrics_s3_key(metrics_json_key)
# Use the latest metric file on on the same day
if metric_datetime.date() == pick_date.date():
pick_metrics_json_key = metrics_json_key
break
assert pick_metrics_json_key, f"Selected date {pick_date} is not found in the latest_metrics_jsons: {latest_metrics_jsons}"
s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
metrics_json = s3.get_file_as_json(pick_metrics_json_key)
return (metrics_json, pick_metrics_json_key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Local metrics file comparison
parser.add_argument("--control", default=None, help="The control group metrics file for comparison. "
"If unprovided, will attempt to download and compare the previous JSON from S3 "
"within the past week. The platform flag must be specified in this case.")
parser.add_argument("--treatment", default=None, help="The treatment metrics file for comparison.")
# S3 metrics file comparison
parser.add_argument("--name", help="Name of the userbenchmark to detect regression.")
parser.add_argument("--platform", choices=PLATFORMS, default=None, help="The name of platform of the regression.")
parser.add_argument("--start-date", default=None, help="The start date to detect regression.")
parser.add_argument("--end-date", default=None, help="The latest date to detect regression.")
# download from S3
parser.add_argument("--download-from-s3", action='store_true', help="Only download the existing regression yaml file from S3." \
"The regression yaml file can be used for bisection.")
# output file path
parser.add_argument("--output", default=None, help="Output path to print the regression detection file.")
# GitHub issue details
parser.add_argument("--owner", nargs="*", default=[DEFAULT_GH_ISSUE_OWNER], help="Owner(s) to cc on regression issues, e.g., @janeyx99.")
parser.add_argument("--gh-issue-path", default="gh-issue.md", help="Output path to print the issue body")
parser.add_argument("--errors-path", default="errors.txt",
help="Path to errors log generated by the benchmarks run. " +
"Its existence ONLY is used to detect whether runtime regressions occurred.")
args = parser.parse_args()
owner = " ".join(args.owner) if args.owner else DEFAULT_GH_ISSUE_OWNER
# User provided both control and treatment files
if args.control and args.treatment:
with open(args.control, "r") as cfptr:
control = json.load(cfptr)
with open(args.treatment, "r") as tfptr:
treatment = json.load(tfptr)
output_path = args.output if args.output else get_default_output_path(control["name"])
regression_result = generate_regression_result(control, treatment)
process_regressions_into_yaml(regression_result, output_path, args.control, args.treatment)
process_regressions_into_gh_issue(regression_result, owner, args.gh_issue_path, args.errors_path)
exit(0)
# Query S3 to get control and treatment json files
if not args.platform:
raise ValueError("A platform must be specified with the --platform flag to retrieve the "
"previous metrics JSONs as control from S3.")
# User only provide the treatement file, and expect us to download from S3
control, treatment = None, None
if not args.control and args.treatment:
json_path = Path(args.treatment)
assert json_path.exists(), f"Specified result json path {args.treatment} does not exist."
end_date: datetime = datetime.strptime(get_date_from_metrics(json_path.stem), "%Y-%m-%d")
userbenchmark_name: str = get_ub_name(args.treatment)
with open(json_path, "r") as cfptr:
treatment = json.load(cfptr)
else:
assert args.name, f"To detect regression with S3, you must specify a userbenchmark name."
userbenchmark_name = args.name
end_date = datetime.strptime(args.end_date, "%Y-%m-%d")
# Only download the existing regression YAML file from S3
if args.download_from_s3:
assert args.output, f"You must specify a regression output file path for S3 download."
regression_yaml_cond = lambda x: x.endswith('.yaml') and 'regression' in x
available_regression_yamls = get_latest_files_in_s3_from_last_n_days(userbenchmark_name, args.platform, end_date, regression_yaml_cond, ndays=1)
if not len(available_regression_yamls):
raise RuntimeError(f"No regression yaml found on S3 for end date {end_date}, userbenchmark {userbenchmark_name}, and platform {args.platform}")
latest_regression_yaml = available_regression_yamls[0]
s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
regression_yaml = s3.get_file_as_yaml(latest_regression_yaml)
with open(args.output, "w") as rf:
yaml.safe_dump(regression_yaml, rf)
print(f"Downloaded the regression yaml file to path {args.output}")
exit(0)
metrics_json_cond = lambda x: x.endswith('.json') and 'metrics' in x
available_metrics_jsons = get_latest_files_in_s3_from_last_n_days(userbenchmark_name, args.platform, end_date, metrics_json_cond, ndays=7)
# Download control from S3
if len(available_metrics_jsons) == 0:
raise RuntimeError(f"No previous JSONS in a week found to compare towards the end date {end_date}. No regression info has been generated.")
print(f"Found metrics json files on S3: {available_metrics_jsons}")
start_date = args.start_date if args.start_date else get_best_start_date(available_metrics_jsons, end_date)
if not start_date:
raise RuntimeError(f"No start date in previous JSONS found to compare towards the end date {end_date}. User specified start date: {args.start_date}. " +
f"Available JSON dates: {available_metrics_jsons.keys()}. No regression info has been generated.")
print(f"[TorchBench Regression Detector] Detecting regression of {userbenchmark_name} on platform {args.platform}, start date: {start_date}, end date: {end_date}.")
(control, control_file) = get_metrics_by_date(available_metrics_jsons, start_date) if not control else (control, args.control)
(treatment, treatment_file) = get_metrics_by_date(available_metrics_jsons, end_date) if not treatment else (treatment, args.treatment)
regression_result = generate_regression_result(control, treatment)
output_path = args.output if args.output else get_default_output_path(control["name"])
process_regressions_into_yaml(regression_result, output_path, control_file, treatment_file)
process_regressions_into_gh_issue(regression_result, owner, args.gh_issue_path, args.errors_path)
|
"""bisection.py
Runs bisection to determine PRs that trigger performance signals.
It assumes that the pytorch, torchbench, torchvision, and torchaudio repositories provided are all clean with the latest code.
By default, the torchaudio and torchvision packages will be fixed to the latest commit on the same pytorch commit date.
Usage:
python bisection.py --work-dir <WORK_DIR> \
--torch-repos-path <PYTORCH_REPOS_PATH> \
--torchbench-repo-path <TORCHBENCH_SRC_DIR> \
--config <BISECT_CONFIG> --output <OUTPUT_FILE_PATH>
"""
import argparse
import os
import sys
import json
import time
import shutil
import yaml
from pathlib import Path
import subprocess
from datetime import datetime
from dataclasses import asdict
from typing import Optional, List, Dict, Tuple, Any, Callable
from userbenchmark.utils import (
TorchBenchABTestResult,
parse_abtest_result_from_regression_file_for_bisect
)
from regression_detector import generate_regression_result
from utils import gitutils
from utils.build_utils import (
setup_bisection_build_env,
build_repo,
cleanup_torch_packages,
TorchRepo,
)
from utils.cuda_utils import prepare_cuda_env, DEFAULT_CUDA_VERSION
TORCHBENCH_BISECTION_TARGETS = {
"pytorch": {
"name": "pytorch",
"url": "https://github.com/pytorch/pytorch.git",
"build_command": [sys.executable, "setup.py", "install"],
},
"torchdata": {
"name": "data",
"url": "https://github.com/pytorch/data.git",
"build_command": [sys.executable, "setup.py", "install"],
},
"torchvision": {
"name": "vision",
"url": "https://github.com/pytorch/vision.git",
"build_command": [sys.executable, "setup.py", "install"],
},
"torchaudio": {
"name": "audio",
"url": "https://github.com/pytorch/audio.git",
"build_command": [sys.executable, "setup.py", "clean", "develop"],
},
"torchbench": {
"name": "benchmark",
"url": "https://github.com/pytorch/benchmark.git",
"build_command": [sys.executable, "install.py"],
},
}
SKIP_INSTALL_TORCHBENCH = False
def exist_dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
def exist_file_path(string):
if not os.path.exists(string):
raise FileNotFoundError(string)
elif os.path.isdir(string):
return IsADirectoryError(string)
else:
return string
def get_latest_non_empty_file(directory: str, cond: Callable) -> Optional[str]:
if os.path.isdir(directory):
filelist = [ os.path.join(directory, f) for f in os.listdir(directory) ]
non_empty_filelist = [ f for f in filelist if os.path.getsize(f) and cond(f) ]
if len(non_empty_filelist):
return max(non_empty_filelist, key=os.path.getctime)
return None
def get_updated_clean_torch_repos(pytorch_repos_path: str,
torchbench_repo_path: Optional[str]=None,
skip_update_repos: Optional[List[str]]=None) -> Dict[str, TorchRepo]:
all_repos = {}
def _gen_torch_repo(repo_name: str, repo_path: str):
assert repo_path.exists() and repo_path.is_dir(), f"{str(repo_path)} is not an existing directory."
main_branch = "main" if not "main_branch" in TORCHBENCH_BISECTION_TARGETS[repo_name] else \
TORCHBENCH_BISECTION_TARGETS[repo_name]["main_branch"]
if not skip_update_repos or not repo_name in skip_update_repos:
gitutils.cleanup_local_changes(repo_path.absolute())
assert gitutils.update_git_repo(repo_path.absolute(), main_branch)
assert gitutils.clean_git_repo(repo_path.absolute())
cur_commit = gitutils.get_current_commit(repo_path.absolute())
return TorchRepo(name=repo_name,
origin_url=TORCHBENCH_BISECTION_TARGETS[repo_name]["url"],
main_branch=main_branch,
src_path=repo_path,
cur_commit=cur_commit,
build_command=TORCHBENCH_BISECTION_TARGETS[repo_name]["build_command"])
for repo_name in TORCHBENCH_BISECTION_TARGETS.keys():
repo_subdir_name = TORCHBENCH_BISECTION_TARGETS[repo_name]["name"]
repo_path = Path(pytorch_repos_path).joinpath(repo_subdir_name) if not (torchbench_repo_path and repo_name == "torchbench") \
else Path(torchbench_repo_path)
all_repos[repo_name] = _gen_torch_repo(repo_name, repo_path)
return all_repos
class Commit:
sha: str
ctime: str
digest: Optional[Dict[str, Any]]
def __init__(self, sha, ctime):
self.sha = sha
self.ctime = ctime
self.digest = None
def __str__(self):
return self.sha
class BisectionTargetRepo:
repo: TorchRepo
start: str
end: str
non_target_repos: List[TorchRepo]
# generated in prep()
bisection_env: os._Environ
commits: List[Commit]
# Map from commit SHA to its index in commits
commit_dict: Dict[str, int]
def __init__(self, repo: TorchRepo, start: str, end: str, non_target_repos: List[TorchRepo]):
self.repo = repo
self.start = start
self.end = end
self.non_target_repos = non_target_repos
self.commits = []
self.commit_dict = dict()
# Checkout the last commit of non-target repos on date
def _checkout_non_target_repos(self, cdate: datetime):
for repo in self.non_target_repos:
gitutils.checkout_git_branch(repo.src_path.absolute(), repo.main_branch)
dep_commit = gitutils.get_git_commit_on_date(repo.src_path.absolute(), cdate)
assert dep_commit, f"Failed to find the commit on {cdate} of {repo.name}"
print(f"Checking out {repo.name} commit {dep_commit} ...", end="", flush=True)
assert gitutils.checkout_git_commit(repo.src_path.absolute(), dep_commit), \
f"Failed to checkout commit {dep_commit} of {repo.name}"
print("done.")
def prep(self) -> bool:
base_build_env = prepare_cuda_env(cuda_version=DEFAULT_CUDA_VERSION)
self.bisection_env = setup_bisection_build_env(base_build_env)
commits = gitutils.get_git_commits(self.repo.src_path, self.start, self.end)
if not commits or len(commits) < 2:
print(f"Failed to retrieve commits from {self.start} to {self.end} in {self.repo.src_path}.")
return False
for count, commit in enumerate(commits):
ctime = gitutils.get_git_commit_date(self.repo.src_path, commit)
self.commits.append(Commit(sha=commit, ctime=ctime))
self.commit_dict[commit] = count
return True
def get_mid_commit(self, left: Commit, right: Commit) -> Optional[Commit]:
left_index = self.commit_dict[left.sha]
right_index = self.commit_dict[right.sha]
if right_index == left_index + 1:
return None
else:
return self.commits[int((left_index + right_index) / 2)]
def build(self, commit: Commit):
# checkout target repo commit
print(f"====================== [TORCHBENCH] Checking out target repo {self.repo.name} commit {commit.sha} " \
"=======================", flush=True)
assert gitutils.checkout_git_commit(self.repo.src_path.absolute(), commit.sha)
# checkout non-target repos commit
ctime = datetime.strptime(commit.ctime.split(" ")[0], "%Y-%m-%d")
self._checkout_non_target_repos(ctime)
# build target repo
build_repo(self.repo, self.bisection_env)
# build non target repos
for repo in self.non_target_repos:
build_repo(repo, self.bisection_env)
class TorchBenchRepo:
repo: TorchRepo
target_repo: BisectionTargetRepo
workdir: Path
bisection_env: os._Environ
timelimit: int # timeout limit in minutes
first_time: bool
def __init__(self,
repo: TorchRepo,
target_repo: BisectionTargetRepo,
workdir: Path):
self.repo = repo
self.target_repo = target_repo
self.workdir = workdir
self.first_time = True
def prep(self, bisection_env: os._Environ) -> bool:
self.bisection_env = bisection_env
return True
def _install_benchmark(self):
"Install and build TorchBench dependencies"
command = [sys.executable, "install.py"]
subprocess.check_call(command, cwd=self.repo.src_path.absolute(), env=self.bisection_env)
def _run_benchmark_for_commit(self, commit: Commit, bisect_config: TorchBenchABTestResult) -> str:
# Return the result json file path
output_dir = os.path.join(self.workdir.absolute(), commit.sha)
# If the directory already exists, clear its contents
if os.path.exists(output_dir):
assert os.path.isdir(output_dir), "Must specify output directory: {output_dir}"
shutil.rmtree(output_dir)
os.mkdir(output_dir)
# If the first time to run benchmark, install the dependencies first
if self.first_time and not SKIP_INSTALL_TORCHBENCH:
self._install_benchmark()
self.first_time = False
bm_name = bisect_config.name
output_file = "metrics-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
output_file_path = os.path.join(output_dir, output_file)
print(f"===================== [TORCHBENCH] Running TorchBench for commit: {commit.sha} START =====================", flush=True)
command = [sys.executable, "run_benchmark.py", bm_name, "--run-bisect", bisect_config.bisection_config_file_path, "--output", output_file_path]
subprocess.check_call(command, cwd=self.repo.src_path, env=self.bisection_env)
print(f"===================== [TORCHBENCH] Running TorchBench for commit: {commit.sha} END. OUTPUT: {output_file_path} =====================", flush=True)
return output_file_path
def _gen_digest(self, result_json: str) -> Dict[str, float]:
out = {}
if not os.path.getsize(result_json):
print(f"Empty json file {result_json}. Return empty digest.")
return out
with open(result_json, "r") as df:
data = json.load(df)
return data
def get_digest_for_commit(self, commit: Commit, abtest_result: Dict[str, Any], debug: bool) -> Dict[str, float]:
# digest is cached before
if commit.digest:
return commit.digest
# if in debug mode, load from the benchmark file if it exists
if debug:
result_dir = os.path.join(self.workdir, commit.sha)
result_json = get_latest_non_empty_file(result_dir, lambda x: x.endswith(".json"))
if result_json:
commit.digest = self._gen_digest(result_json)
return commit.digest
# Build all torch packages
self.target_repo.build(commit)
# Run benchmark, return the output json file
result_json = self._run_benchmark_for_commit(commit, abtest_result)
commit.digest = self._gen_digest(result_json)
print(f"================== [TORCHBENCH] Cleaning up packages for commit {commit.sha} ==================", flush=True)
cleanup_torch_packages()
return commit.digest
class TorchBenchBisection:
workdir: Path
torch_repos: Dict[str, TorchRepo]
target_repo: BisectionTargetRepo
torchbench: TorchBenchRepo
bisect_config: TorchBenchABTestResult
output_json: str
debug: bool
# left commit, right commit, TorchBenchABTestResult to test
bisectq: List[Tuple[Commit, Commit, TorchBenchABTestResult]]
result: List[Tuple[Commit, Commit]]
def __init__(self,
workdir: str,
torch_repos: List[TorchRepo],
target_repo: TorchRepo,
start: str,
end: str,
bisect_config: TorchBenchABTestResult,
output_json: str,
debug: bool = False):
self.workdir = Path(workdir)
self.torch_repos = torch_repos
non_target_repos = list(filter(lambda x: not x.name == target_repo.name and not x.name == "torchbench", torch_repos.values()))
self.target_repo = BisectionTargetRepo(repo=target_repo, start=start, end=end, non_target_repos=non_target_repos)
self.torchbench = TorchBenchRepo(repo=torch_repos["torchbench"],
target_repo=self.target_repo,
workdir=self.workdir)
self.bisect_config = bisect_config
self.bisectq = list()
self.result = list()
self.output_json = output_json
self.debug = debug
def prep(self) -> bool:
cleanup_torch_packages()
if not self.target_repo.prep():
return False
if not self.torchbench.prep(self.target_repo.bisection_env):
return False
left_commit = self.target_repo.commits[0]
right_commit = self.target_repo.commits[-1]
self.bisectq.append((left_commit, right_commit, self.bisect_config))
return True
# Left: older commit, right: newer commit, target: TorchBenchABTestResult
# Return: List of [left, right, TorchBenchABTestResult] that satisfy the regression rule
def regression_detection(self, left: Commit, right: Commit) -> TorchBenchABTestResult:
# If uncalculated, commit.digest will be None
assert left.digest, "Commit {left.sha} must have a digest"
assert right.digest, "Commit {right.sha} must have a digest"
regression_result = generate_regression_result(left.digest, right.digest)
regression_file = f"regression-{left.sha}-{right.sha}.yaml"
regression_file_full_path = os.path.join(self.workdir.absolute(), regression_file)
with open(regression_file_full_path, "w") as rf:
rf.write(yaml.safe_dump(asdict(regression_result)))
regression_result.bisection_config_file_path = regression_file_full_path
return regression_result
def run(self):
while len(self.bisectq):
(left, right, abtest_result) = self.bisectq.pop(0)
self.torchbench.get_digest_for_commit(left, abtest_result, self.debug)
self.torchbench.get_digest_for_commit(right, abtest_result, self.debug)
updated_abtest_result = self.regression_detection(left, right)
if len(updated_abtest_result.details) or \
len(updated_abtest_result.control_only_metrics) or \
len(updated_abtest_result.treatment_only_metrics):
mid = self.target_repo.get_mid_commit(left, right)
if mid == None:
self.result.append((left, right))
else:
self.bisectq.append((left, mid, updated_abtest_result))
self.bisectq.append((mid, right, updated_abtest_result))
def output(self):
json_obj = dict()
json_obj["target_repo"] = self.target_repo.repo.name
json_obj["start"] = self.target_repo.start
json_obj["end"] = self.target_repo.end
json_obj["result"] = []
for res in self.result:
r = dict()
r["commit1"] = res[0].sha
r["commit1_time"] = res[0].ctime
r["commit1_digest"] = res[0].digest
r["commit2"] = res[1].sha
r["commit2_time"] = res[1].ctime
r["commit2_digest"] = res[1].digest
json_obj["result"].append(r)
with open(self.output_json, 'w') as outfile:
json.dump(json_obj, outfile, indent=2)
print(f"Bisection successful. Result saved to {self.output_json}:")
print(json_obj)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--work-dir",
required=True,
help="bisection working directory for logs and results",
type=exist_dir_path)
parser.add_argument("--torch-repos-path",
required=True,
help="the directory of pytorch/* source code repositories",
type=exist_dir_path)
parser.add_argument("--torchbench-repo-path",
default=None,
help="the directory of torchbench source code git repository, if None, use `args.torch_repo_path/benchmark`.",
type=exist_dir_path)
parser.add_argument("--config",
required=True,
help="the regression dict output of regression_detector.py in YAML",
type=exist_file_path)
parser.add_argument("--skip-install-torchbench", action="store_true", help="Skip installing torchbench")
parser.add_argument("--output",
required=True,
help="the output json file")
parser.add_argument("--skip-update", type=str, default="torchbench", help="Repositories to skip update.")
# by default, debug mode is disabled
parser.add_argument("--debug",
help="run in debug mode, if the result json exists, use it directly",
action='store_true')
args = parser.parse_args()
bisect_config = parse_abtest_result_from_regression_file_for_bisect(args.config)
# sanity checks
assert bisect_config.name, "Invalid bisection config, must specify userbenchmark name."
assert bisect_config.control_env["git_commit_hash"], "Invalid bisection config, must specify control group commit hash."
assert bisect_config.treatment_env["git_commit_hash"], "Invalid bisection config, must specify treatment group commit hash."
assert bisect_config.bisection in TORCHBENCH_BISECTION_TARGETS.keys(), f"Invalid bisection config, " \
f"get bisection target repo {bisect_config.bisection}, " \
f"available target repos: {TORCHBENCH_BISECTION_TARGETS.keys()}"
assert bisect_config.bisection_mode == "bisect", "Abtest mode is not supported yet."
assert len(bisect_config.details), "The bisection target metrics must not be empty."
if args.skip_update:
skip_update_repos = list(map(lambda x: x.strip(), args.skip_update.split(",")))
for repo in skip_update_repos:
assert repo in list(TORCHBENCH_BISECTION_TARGETS.keys()), f"User specified skip update repo {repo} not in list: {TORCHBENCH_BISECTION_TARGETS.keys()}"
else:
skip_update_repos = None
if args.skip_install_torchbench:
SKIP_INSTALL_TORCHBENCH = True
# load, update, and clean the repo directories
torch_repos: Dict[str, TorchRepo] = get_updated_clean_torch_repos(args.torch_repos_path, args.torchbench_repo_path, skip_update_repos)
target_repo = torch_repos[bisect_config.bisection]
start_hash = gitutils.get_torch_main_commit(target_repo.src_path.absolute(), bisect_config.control_env["git_commit_hash"])
end_hash = gitutils.get_torch_main_commit(target_repo.src_path.absolute(), bisect_config.treatment_env["git_commit_hash"])
bisection = TorchBenchBisection(workdir=args.work_dir,
torch_repos=torch_repos,
target_repo=torch_repos[bisect_config.bisection],
start=start_hash,
end=end_hash,
bisect_config=bisect_config,
output_json=args.output,
debug=args.debug)
assert bisection.prep(), "The working condition of bisection is not satisfied."
print("Preparation steps ok. Commit to bisect: " + " ".join([str(x) for x in bisection.target_repo.commits]))
bisection.run()
bisection.output()
|
from enum import Enum
# Enum class for each Domain for the model and the respective tasks
# that is available in the domain.
class COMPUTER_VISION(Enum):
SEGMENTATION = "segmentation"
CLASSIFICATION = "classification"
DETECTION = "detection"
GENERATION = "generation"
PATTERN_RECOGNITION = "pattern recognition"
VIDEO_INTERPOLATION = "video interpolation"
OTHER_COMPUTER_VISION = "other computer vision"
class NLP(Enum):
TRANSLATION = "translation"
LANGUAGE_MODELING = "language modeling"
GENERATION = "generation"
OTHER_NLP = "other nlp"
class SPEECH(Enum):
SYNTHESIS = "synthesis"
RECOGNITION = "recognition"
class RECOMMENDATION(Enum):
RECOMMENDATION = "recommendation"
class REINFORCEMENT_LEARNING(Enum):
OTHER_RL = "other rl"
class OTHER(Enum):
OTHER_TASKS = "other tasks"
class GNN(Enum):
CLASSIFICATION = "classification"
|
import contextlib
import dataclasses
import gc
import importlib
import io
import os
import pathlib
import subprocess
import sys
import tempfile
import threading
from pathlib import Path
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple
from urllib import request
import torch
from components._impl.tasks import base as base_task
from components._impl.workers import subprocess_worker
class ModelNotFoundError(RuntimeError):
pass
REPO_PATH = Path(os.path.abspath(__file__)).parent.parent
DATA_PATH = os.path.join(REPO_PATH, "torchbenchmark", "data", ".data")
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_PATH)):
from utils import TORCH_DEPS, get_pkg_versions, proxy_suggestion
this_dir = pathlib.Path(__file__).parent.absolute()
model_dir = 'models'
internal_model_dir = "fb"
canary_model_dir = "canary_models"
install_file = 'install.py'
def _test_https(test_url: str = 'https://github.com', timeout: float = 0.5) -> bool:
try:
request.urlopen(test_url, timeout=timeout)
except OSError:
return False
return True
def _install_deps(model_path: str, verbose: bool = True) -> Tuple[bool, Any]:
from .util.env_check import get_pkg_versions
run_args = [
[sys.executable, install_file],
]
run_env = os.environ.copy()
run_env["PYTHONPATH"] = this_dir.parent
run_kwargs = {
'cwd': model_path,
'check': True,
'env': run_env,
}
output_buffer = None
_, stdout_fpath = tempfile.mkstemp()
try:
output_buffer = io.FileIO(stdout_fpath, mode="w")
if os.path.exists(os.path.join(model_path, install_file)):
if not verbose:
run_kwargs['stderr'] = subprocess.STDOUT
run_kwargs['stdout'] = output_buffer
versions = get_pkg_versions(TORCH_DEPS)
subprocess.run(*run_args, **run_kwargs) # type: ignore
new_versions = get_pkg_versions(TORCH_DEPS)
if versions != new_versions:
errmsg = f"The torch packages are re-installed after installing the benchmark deps. \
Before: {versions}, after: {new_versions}"
return (False, errmsg, None)
else:
return (True, f"No install.py is found in {model_path}. Skip.", None)
except subprocess.CalledProcessError as e:
return (False, e.output, io.FileIO(stdout_fpath, mode="r").read().decode())
except Exception as e:
return (False, e, io.FileIO(stdout_fpath, mode="r").read().decode())
finally:
del output_buffer
os.remove(stdout_fpath)
return (True, None, None)
def dir_contains_file(dir, file_name) -> bool:
names = map(lambda x: x.name, filter(lambda x: x.is_file(), dir.iterdir()))
return file_name in names
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(model_dir)
# Only load the model directories that contain a "__init.py__" file
models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \
(not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py"))
p = p.joinpath(internal_model_dir)
if p.exists():
m = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and dir_contains_file(child, "__init__.py"))
models.extend(m)
return models
def _list_canary_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(canary_model_dir)
# Only load the model directories that contain a "__init.py__" file
models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \
(not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py"))
return models
def _is_internal_model(model_name: str) -> bool:
p = pathlib.Path(__file__).parent.joinpath(model_dir).joinpath(internal_model_dir).joinpath(model_name)
if p.exists() and p.joinpath("__init__.py").exists():
return True
return False
def _is_canary_model(model_name: str) -> bool:
p = pathlib.Path(__file__).parent.joinpath(canary_model_dir).joinpath(model_name)
if p.exists() and p.joinpath("__init__.py").exists():
return True
return False
def setup(models: List[str] = [], verbose: bool = True, continue_on_fail: bool = False, test_mode: bool = False, allow_canary: bool = False) -> bool:
if not _test_https():
print(proxy_suggestion)
sys.exit(-1)
failures = {}
models = list(map(lambda p: p.lower(), models))
model_paths = filter(lambda p: True if not models else os.path.basename(p).lower() in models, _list_model_paths())
if allow_canary:
canary_model_paths = filter(lambda p: os.path.basename(p).lower() in models, _list_canary_model_paths())
model_paths = list(model_paths)
model_paths.extend(canary_model_paths)
for model_path in model_paths:
print(f"running setup for {model_path}...", end="", flush=True)
if test_mode:
versions = get_pkg_versions(TORCH_DEPS)
success, errmsg, stdout_stderr = _install_deps(model_path, verbose=verbose)
if test_mode:
new_versions = get_pkg_versions(TORCH_DEPS, reload=True)
if versions != new_versions:
print(f"The torch packages are re-installed after installing the benchmark model {model_path}. \
Before: {versions}, after: {new_versions}")
sys.exit(-1)
if success and errmsg and "No install.py is found" in errmsg:
print("SKIP - No install.py is found")
elif success:
print("OK")
else:
print("FAIL")
try:
errmsg = errmsg.decode()
except Exception:
pass
# If the install was very chatty, we don't want to overwhelm.
# This will not affect verbose mode, which does not catch stdout
# and stderr.
log_lines = (stdout_stderr or "").splitlines(keepends=False)
if len(log_lines) > 40:
log_lines = log_lines[:20] + ["..."] + log_lines[-20:]
stdout_stderr = "\n".join(log_lines)
if stdout_stderr:
errmsg = f"{stdout_stderr}\n\n{errmsg or ''}"
failures[model_path] = errmsg
if not continue_on_fail:
break
for model_path in failures:
print(f"Error for {model_path}:")
print("---------------------------------------------------------------------------")
print(failures[model_path])
print("---------------------------------------------------------------------------")
print()
return len(failures) == 0
@dataclasses.dataclass(frozen=True)
class ModelDetails:
"""Static description of what a particular TorchBench model supports.
When parameterizing tests, we only want to generate sensible ones.
(e.g. Those where a model can be imported and supports the feature to be
tested or benchmarked.) This requires us to import the model; however many
of the models are EXTREMELY stateful, and even importing them consumes
significant system resources. As a result, we only want one (or a few)
alive at any given time.
Note that affinity cannot be solved by simply calling `torch.set_num_threads`
in the child process; this will cause PyTorch to use all of the cores but
at a much lower efficiency.
This class describes what a particular model does and does not support, so
that we can release the underlying subprocess but retain any pertinent
metadata.
"""
path: str
exists: bool
_diagnostic_msg: str
metadata: Dict[str, Any]
@property
def name(self) -> str:
return os.path.basename(self.path)
class Worker(subprocess_worker.SubprocessWorker):
"""Run subprocess using taskset if CPU affinity is set.
When GOMP_CPU_AFFINITY is set, importing `torch` in the main process has
the very surprising effect of changing the threading behavior in the
subprocess. (See https://github.com/pytorch/pytorch/issues/49971 for
details.) This is a problem, because it means that the worker is not
hermetic and also tends to force the subprocess torch to run in single
threaded mode which drastically skews results.
This can be ameliorated by calling the subprocess using `taskset`, which
allows the subprocess PyTorch to properly bind threads.
"""
@property
def args(self) -> List[str]:
affinity = os.environ.get("GOMP_CPU_AFFINITY", "")
return (
["taskset", "--cpu-list", affinity] if affinity else []
) + super().args
class ModelTask(base_task.TaskBase):
# The worker may (and often does) consume significant system resources.
# In order to ensure that runs do not interfere with each other, we only
# allow a single ModelTask to exist at a time.
_lock = threading.Lock()
def __init__(
self,
model_path: str,
timeout: Optional[float] = None,
extra_env: Optional[Dict[str, str]] = None,
) -> None:
gc.collect() # Make sure previous task has a chance to release the lock
assert self._lock.acquire(blocking=False), "Failed to acquire lock."
self._model_path = model_path
if _is_internal_model(model_path):
model_path = f"{internal_model_dir}.{model_path}"
self._worker = Worker(timeout=timeout, extra_env=extra_env)
self.worker.run("import torch")
self._details: ModelDetails = ModelDetails(
**self._maybe_import_model(
package=__name__,
model_path=model_path,
)
)
def __del__(self) -> None:
self._lock.release()
@property
def worker(self) -> subprocess_worker.SubprocessWorker:
return self._worker
@property
def model_details(self) -> bool:
return self._details
# =========================================================================
# == Import Model in the child process ====================================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def _maybe_import_model(package: str, model_path: str) -> Dict[str, Any]:
import importlib
import os
import traceback
model_name = os.path.basename(model_path)
diagnostic_msg = ""
try:
module = importlib.import_module(f'.models.{model_name}', package=package)
if accelerator_backend := os.getenv("ACCELERATOR_BACKEND"):
setattr(module, accelerator_backend, importlib.import_module(accelerator_backend))
Model = getattr(module, 'Model', None)
if Model is None:
diagnostic_msg = f"Warning: {module} does not define attribute Model, skip it"
elif not hasattr(Model, 'name'):
Model.name = model_name
except ModuleNotFoundError as e:
traceback.print_exc()
exit(-1)
# Populate global namespace so subsequent calls to worker.run can access `Model`
globals()["Model"] = Model
# This will be used to populate a `ModelDetails` instance in the parent.
return {
"path": model_path,
"exists": Model is not None,
"_diagnostic_msg": diagnostic_msg,
"metadata": {}
}
# =========================================================================
# == Instantiate a concrete `model` instance ==============================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def make_model_instance(test: str, device: str, batch_size: Optional[int]=None, extra_args: List[str]=[]) -> None:
Model = globals()["Model"]
model = Model(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
import gc
gc.collect()
if device == 'cuda':
torch.cuda.empty_cache()
maybe_sync = torch.cuda.synchronize
else:
maybe_sync = lambda: None
globals().update({
"model": model,
"maybe_sync": maybe_sync,
})
# =========================================================================
# == Replace the `invoke()` function in `model` instance ==================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def replace_invoke(module_name: str, func_name: str) -> None:
import importlib
# import function from pkg
model = globals()["model"]
try:
module = importlib.import_module(module_name)
inject_func = getattr(module, func_name, None)
if inject_func is None:
diagnostic_msg = f"Warning: {module} does not define attribute {func_name}, skip it"
except ModuleNotFoundError as e:
diagnostic_msg = f"Warning: Could not find dependent module {e.name} for Model {model.name}, skip it"
model.invoke = inject_func.__get__(model)
# =========================================================================
# == Get Model attribute in the child process =============================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def get_model_attribute(attr: str, field: str=None, classattr: bool=False) -> Any:
if classattr:
model = globals()["Model"]
else:
model = globals()["model"]
if hasattr(model, attr):
if field:
model_attr = getattr(model, attr)
return getattr(model_attr, field)
else:
return getattr(model, attr)
else:
return None
def gc_collect(self) -> None:
self.worker.run("""
import gc
gc.collect()
""")
def del_model_instance(self):
self.worker.run("""
del model
del maybe_sync
""")
self.gc_collect()
# =========================================================================
# == Forward calls to `model` from parent to worker =======================
# =========================================================================
def set_train(self) -> None:
self.worker.run("model.set_train()")
def invoke(self) -> None:
self.worker.run("""
model.invoke()
maybe_sync()
""")
def set_eval(self) -> None:
self.worker.run("model.set_eval()")
def extract_details_train(self) -> None:
self._details.metadata["train_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark")
self._details.metadata["train_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic")
def check_details_train(self, device, md) -> None:
self.extract_details_train()
if device == 'cuda':
assert md["train_benchmark"] == self._details.metadata["train_benchmark"], \
"torch.backends.cudnn.benchmark does not match expect metadata during training."
assert md["train_deterministic"] == self._details.metadata["train_deterministic"], \
"torch.backends.cudnn.deterministic does not match expect metadata during training."
def extract_details_eval(self) -> None:
self._details.metadata["eval_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark")
self._details.metadata["eval_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic")
# FIXME: Models will use context "with torch.no_grad():", so the lifetime of no_grad will end after the eval().
# FIXME: Must incorporate this "torch.is_grad_enabled()" inside of actual eval() func.
# self._details.metadata["eval_nograd"] = not self.worker.load_stmt("torch.is_grad_enabled()")
self._details.metadata["eval_nograd"] = True
def check_details_eval(self, device, md) -> None:
self.extract_details_eval()
if device == 'cuda':
assert md["eval_benchmark"] == self._details.metadata["eval_benchmark"], \
"torch.backends.cudnn.benchmark does not match expect metadata during eval."
assert md["eval_deterministic"] == self._details.metadata["eval_deterministic"], \
"torch.backends.cudnn.deterministic does not match expect metadata during eval."
assert md["eval_nograd"] == self._details.metadata["eval_nograd"], \
"torch.is_grad_enabled does not match expect metadata during eval."
@base_task.run_in_worker(scoped=True)
@staticmethod
def check_eval_output() -> None:
instance = globals()["model"]
assert instance.test == "eval", "We only support checking output of an eval test. Please submit a bug report."
instance.invoke()
@base_task.run_in_worker(scoped=True)
@staticmethod
def check_device() -> None:
instance = globals()["model"]
# Check this BenchmarkModel has a device attribute.
current_device = getattr(instance, 'device', None)
if current_device is None:
raise RuntimeError('Missing device in BenchmarkModel.')
model, inputs = instance.get_module()
model_name = getattr(model, 'name', None)
# Check the model tensors are assigned to the expected device.
for t in model.parameters():
model_device = t.device.type
if model_device != current_device:
raise RuntimeError(f'Model {model_name} was not set to the'
f' expected device {current_device},'
f' found device {model_device}.')
# Check the inputs are assigned to the expected device.
def check_inputs(inputs):
if isinstance(inputs, torch.Tensor):
if inputs.dim() and current_device == "cuda":
# Zero dim Tensors (Scalars) can be captured by CUDA
# kernels and need not match device.
return
inputs_device = inputs.device.type
if inputs_device != current_device:
raise RuntimeError(f'Model {model_name} inputs were'
f' not set to the expected device'
f' {current_device}, found device'
f' {inputs_device}.')
elif isinstance(inputs, tuple):
# Some inputs are nested inside tuples, such as tacotron2
for i in inputs:
check_inputs(i)
elif isinstance(inputs, dict):
# Huggingface models take inputs as kwargs
for i in inputs.values():
check_inputs(i)
check_inputs(inputs)
# =========================================================================
# == Control `torch` state (in the subprocess) ============================
# =========================================================================
@contextlib.contextmanager
def no_grad(self, disable_nograd: bool) -> None:
# TODO: deduplicate with `torchbenchmark.util.model.no_grad`
initial_value = self.worker.load_stmt("torch.is_grad_enabled()")
eval_in_nograd = (
not disable_nograd and
self.worker.load_stmt("model.eval_in_nograd()"))
try:
self.worker.run(f"torch.set_grad_enabled({not eval_in_nograd})")
yield
finally:
self.worker.run(f"torch.set_grad_enabled({initial_value})")
@contextlib.contextmanager
def watch_cuda_memory(
self,
skip: bool,
assert_equal: Callable[[int, int], NoReturn],
):
# This context manager is used in testing to ensure we're not leaking
# memory; these tests are generally parameterized by device, so in some
# cases we want this (and the outer check) to simply be a no-op.
if skip or os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1':
yield
return
if hasattr(torch._C, '_cuda_clearCublasWorkspaces'):
self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()")
self.gc_collect()
memory_before = self.worker.load_stmt("torch.cuda.memory_allocated()")
yield
if hasattr(torch._C, '_cuda_clearCublasWorkspaces'):
self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()")
self.gc_collect()
assert_equal(
memory_before,
self.worker.load_stmt("torch.cuda.memory_allocated()"),
)
self.worker.run("torch.cuda.empty_cache()")
def list_models_details(workers: int = 1) -> List[ModelDetails]:
return [
ModelTask(model_path).model_details
for model_path in _list_model_paths()
]
def list_models(model_match=None):
models = []
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}"
try:
module = importlib.import_module(f'.models.{model_pkg}', package=__name__)
except ModuleNotFoundError as e:
print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it")
continue
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
continue
if not hasattr(Model, 'name'):
Model.name = model_name
# If given model_match, only return full or partial name matches in models.
if model_match is None:
models.append(Model)
else:
if model_match.lower() in Model.name.lower():
models.append(Model)
return models
def load_model_by_name(model):
models = filter(lambda x: model.lower() == x.lower(),
map(lambda y: os.path.basename(y), _list_model_paths()))
models = list(models)
if not models:
raise ModelNotFoundError(f"{model} is not found in the core model list.")
assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}"
model_name = models[0]
model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}"
module = importlib.import_module(f'.models.{model_pkg}', package=__name__)
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
def load_canary_model_by_name(model: str):
if not _is_canary_model(model):
raise ModelNotFoundError(f"{model} is not found in the canary model list.")
module = importlib.import_module(f'.canary_models.{model}', package=__name__)
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model
return Model
def get_metadata_from_yaml(path):
import yaml
metadata_path = path + "/metadata.yaml"
md = None
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as f:
md = yaml.load(f, Loader=yaml.FullLoader)
return md
def str_to_bool(input: Any) -> bool:
if not input:
return False
return str(input).lower() in ("1", "yes", "y", "true", "t", "on")
|
import os
import pathlib
import importlib
from dataclasses import dataclass
from typing import List, Dict, Any
E2E_MODEL_DIR = 'e2e_models'
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR)
return sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir())
@dataclass
class E2EBenchmarkResult:
device: str
device_num: int
test: str
num_examples: int
num_epochs: int
batch_size: int
result: Dict[str, Any]
def load_e2e_model_by_name(model):
models = filter(lambda x: model.lower() == x.lower(),
map(lambda y: os.path.basename(y), _list_model_paths()))
models = list(models)
if not models:
return None
assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}"
model_name = models[0]
try:
module = importlib.import_module(f'torchbenchmark.e2e_models.{model_name}', package=__name__)
except ModuleNotFoundError as e:
print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it: {e}")
return None
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
|
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="gat", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("GAT doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel
class Model(DiffuserModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="timbrooks/instruct-pix2pix",
test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.diffusers import install_diffusers
from diffusers import StableDiffusionInstructPix2PixPipeline
import torch
MODEL_NAME = "timbrooks/instruct-pix2pix"
def load_model_checkpoint():
StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None)
if __name__ == '__main__':
install_diffusers()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
HuggingFaceAuthMixin.__init__(self)
super().__init__(name="llama_v2_13b", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
def train(self):
return NotImplementedError("FSDP should implement a training loop") |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from .. import lit_llama as lit_llama
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama import Tokenizer
def import_from_file_path(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module_name] = module
return module
lit_llama_generate = import_from_file_path("lit_llama_generate", os.path.join(LIT_LLAMA_PATH, 'generate.py'))
class GenerationWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, idx, max_new_tokens):
return lit_llama_generate.generate(self.model, idx, max_new_tokens)
class Model(lit_llama.Model):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = GenerationWrapper(self.model)
tokenizer = Tokenizer(os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/tokenizer.model"))
# max_new_tokens matches lit-llama/generate.py
self.example_inputs = (tokenizer.encode("The meaning of life is", bos=True, eos=False, device=device), 50)
def train(self):
return NotImplementedError("cannot train on autoregressive generation")
def eval(self):
self.model.eval()
with torch.no_grad():
y = self.model(*self.example_inputs)
return (y,)
|
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from PIL import Image
import numpy as np
import cv2
import torch
import os
from ...util.model import BenchmarkModel
from torchmultimodal.transforms.clip_transform import CLIPTextTransform, CLIPImageTransform
from torchmultimodal.models.clip.model import clip_vit_b32
from torchmultimodal.modules.losses.contrastive_loss_with_temperature import (
ContrastiveLossWithTemperature,
)
from PIL import Image
import math
class Model(BenchmarkModel):
DEFAULT_EVAL_BSIZE = 32
DEFAULT_TRAIN_BSIZE = 32
def __init__(self, test, device, batch_size=1, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data')
self.image_name = "pizza.jpg"
self.image = Image.open(os.path.join(self.data_folder, self.image_name))
self.text = ["pizza", "dog"] * 16
self.img_transform = CLIPImageTransform(is_train=False)
self.text_transform = CLIPTextTransform()
self.images = [self.image for _ in range(self.batch_size)]
self.texts = [self.text for _ in range(self.batch_size)]
self.image_tensor = self.img_transform(self.images).to(self.device)
self.text_tensor = self.text_transform(self.text).to(self.device)
self.model = clip_vit_b32()
self.model.to(self.device)
# Create optimizer
self.loss_fn = ContrastiveLossWithTemperature()
self.optimizer = torch.optim.AdamW(
list(self.model.parameters()) + list(self.loss_fn.parameters()),
lr=5.0e-4,
weight_decay=1.0e-4,
eps=1.0e-6,
)
def get_module(self):
return self.model, (self.image_tensor, self.text_tensor)
def train(self):
self.model.train()
total_loss = 0
self.optimizer.zero_grad()
# Forward pass
image_embedding, text_embedding = self.model(self.image_tensor, self.text_tensor)
# Backward pass
loss = self.loss_fn(image_embedding, text_embedding)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
# Return the average loss
return total_loss / len(self.text)
def eval(self):
self.model.eval()
with torch.no_grad():
image_embedding, text_embedding = self.model(self.image_tensor, self.text_tensor)
score = image_embedding @ text_embedding.t()
return self.text[torch.argmax(score)]
|
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def download_data(data_folder):
# CC-0 image from wikipedia page on pizza so legal to use
subprocess.check_call(['wget', '-O', os.path.join(data_folder, 'pizza.jpg'), 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Pizza-3007395.jpg/2880px-Pizza-3007395.jpg'])
if __name__ == '__main__':
pip_install_requirements()
# Create .data folder in the script's directory
data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data')
os.makedirs(data_folder, exist_ok=True)
download_data(data_folder)
|
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
import os
from torchbenchmark import add_path, REPO_PATH
import sys
import lightning as L
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
with add_path(LIT_LLAMA_PATH):
from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
from lit_llama import LLaMA, Tokenizer
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
checkpoint_path = os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth")
if not os.path.exists(checkpoint_path):
raise NotImplementedError("checkpoint doesn't exist")
with lazy_load(checkpoint_path) as checkpoint:
name = llama_model_lookup(checkpoint)
with EmptyInitOnDevice(device=device):
model = LLaMA.from_name(name)
model.load_state_dict(checkpoint)
self.model = model
self.seq_len = 32
self.max_seq_len = 64
self.example_inputs = (
torch.ones([self.batch_size, self.seq_len], dtype=torch.int32, device=self.device),
self.max_seq_len,
torch.arange(self.seq_len, dtype=torch.int64, device=self.device) # positions
)
def get_module(self):
return self.model, self.example_inputs
def train(self):
return NotImplementedError("you will OOM trying to train directly")
def eval(self):
self.model.eval()
with torch.no_grad():
logits = self.model(*self.example_inputs)
return (logits,)
|
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
import dataclasses
from typing import List
def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]:
def rewrite_option(opt: str) -> str:
new_opt = opt.replace("_", "-")
return f"--{new_opt}"
out = []
for fld in dataclasses.fields(cfg):
new_option = rewrite_option(fld.name)
val = getattr(cfg, fld.name)
if isinstance(val, bool):
if val:
out.append(new_option)
else:
out.append(new_option)
out.append(str(getattr(cfg, fld.name)))
return out
# dummy config location:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_train.sh#L54
# config: A.1dev-embed32-fp32
@dataclasses.dataclass
class FAMBenchTrainConfig:
mini_batch_size: int = 1024
test_mini_batch_size: int = 1024
test_num_workers: int = 0
data_generation: str = "random"
arch_mlp_bot:str = "2000-1500-1500-1500-192"
arch_mlp_top:str = "4000-4000-4000-4000-4000-4000-4000-4000-4000-1"
arch_sparse_feature_size:int = 192
arch_embedding_size:str = "965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965"
num_indices_per_lookup:int = 55
num_indices_per_lookup_fixed:int = 1
numpy_rand_seed:int = 727
weighted_pooling: str = "learned"
# torchbench: run 2 batches only (original 15)
num_batches:int = 2
# torchbench: these items in the original config are disabled
# because they are handled by the framework
# num_batches:int = 15
# warmup_step = 5
# use_gpu: bool = True
# precache_ml_data: bool = True
# dummy config location:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_infer.sh#L54
# config: A.1dev-embed4-fp16
@dataclasses.dataclass
class FAMBenchEvalConfig:
mini_batch_size:int = 1024
test_mini_batch_size:int = 1024
test_num_workers:int = 0
data_generation:str = "random"
arch_mlp_bot:str = "1414-1750-1750-1750-1750-1750-1750-1750-1750-96"
arch_mlp_top:str = "1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1"
arch_sparse_feature_size:int = 96
arch_embedding_size:str = "555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693"
num_indices_per_lookup:int = 8
num_indices_per_lookup_fixed:int = 1
numpy_rand_seed:int = 727
weighted_pooling: str = "fixed"
# original number of batches: 15
num_batches:int = 15
# torchbench: these items in the original config are disabled
# because they either handled by the framework
# or requires extra dependencies that we don't support yet (such as fbgemm and torch2trt_for_mlp)
# disable warmup
# warmup_step: int = 5
# do not support quantize, torch2trt_for_mlp or fbgemm
# quantize_emb_with_bit: int = 4
# use_fbgemm_gpu: bool = True
# use_gpu: bool = True
# inference_only: bool = True
# precache_ml_data: bool = True
# use_torch2trt_for_mlp: bool = True
# quantize_mlp_with_bit: int = 16
|
import sys
from torch.optim.lr_scheduler import _LRScheduler
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
|
"""
Simplifed dlrm model from FAMBench
It doesn't support multiGPU or fbgemm_gpu.
"""
import torch
import sys
import os
import numpy as np
import torch.nn as nn
from torchbenchmark import REPO_PATH
from typing import Tuple, List
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import RECOMMENDATION
# Import FAMBench model path
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
DLRM_PATH = os.path.join(REPO_PATH, "submodules", "FAMBench", "benchmarks", "dlrm", "ootb")
with add_path(DLRM_PATH):
import optim.rwsadagrad as RowWiseSparseAdagrad
from .dlrmnet import DLRM_Net
from .data import prep_data
from .config import FAMBenchTrainConfig, FAMBenchEvalConfig, cfg_to_str
from .args import parse_fambench_args, validate_fambench_args
from .lrscheduler import LRPolicyScheduler
from .utils import unpack_batch, loss_fn_wrap, dlrm_wrap, prefetch
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
FAMBENCH_MODEL = True
# config
DEFAULT_EVAL_ARGS = FAMBenchEvalConfig()
DEFAULT_TRAIN_ARGS = FAMBenchTrainConfig()
DEFAULT_EVAL_BSIZE = DEFAULT_EVAL_ARGS.mini_batch_size
DEFAULT_TRAIN_BSIZE = DEFAULT_TRAIN_ARGS.mini_batch_size
DEEPCOPY: bool = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test, device, batch_size, extra_args)
if test == "train":
self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_TRAIN_ARGS))
self.fambench_args.inference_only = False
elif test == "eval":
self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_EVAL_ARGS))
self.fambench_args.inference_only = True
if device == "cuda":
self.fambench_args.use_gpu = True
self.fambench_args.ndevices = 1
args = self.fambench_args
validate_fambench_args(args)
self.prep(args)
ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld = prep_data(args)
dlrm = DLRM_Net(
args,
m_spa,
ln_emb,
ln_bot,
ln_top,
args.arch_project_size,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
loss_threshold=args.loss_threshold,
ndevices=args.ndevices,
qr_flag=args.qr_flag,
qr_operation=args.qr_operation,
qr_collisions=args.qr_collisions,
qr_threshold=args.qr_threshold,
md_flag=args.md_flag,
md_threshold=args.md_threshold,
weighted_pooling=args.weighted_pooling,
loss_function=args.loss_function,
learning_rate=args.learning_rate,
use_gpu=args.use_gpu,
use_fbgemm_gpu=args.use_fbgemm_gpu,
fbgemm_gpu_codegen_pref=args.fbgemm_gpu_codegen_pref,
inference_only=args.inference_only,
quantize_mlp_with_bit=args.quantize_mlp_with_bit,
quantize_emb_with_bit=args.quantize_emb_with_bit,
use_torch2trt_for_mlp=args.use_torch2trt_for_mlp,)
# In dlrm.quantize_embedding called below, the torch quantize calls run
# on cpu tensors only. They cannot quantize tensors stored on the gpu.
# So quantization occurs on cpu tensors before transferring them to gpu if
# use_gpu is enabled.
if args.quantize_emb_with_bit != 32:
dlrm.quantize_embedding(args.quantize_emb_with_bit)
if not args.inference_only:
assert args.quantize_mlp_with_bit == 32, (
"Dynamic quantization for mlp requires "
+ "--inference-only because training is not supported"
)
else:
# Currently only INT8 and FP16 quantized types are supported for quantized MLP inference.
# By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32)
assert args.quantize_mlp_with_bit in [
8,
16,
32,
], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit)
if not args.use_torch2trt_for_mlp:
if args.quantize_mlp_with_bit == 16 and args.use_gpu:
dlrm.top_l = dlrm.top_l.half()
dlrm.bot_l = dlrm.bot_l.half()
elif args.quantize_mlp_with_bit in [8, 16]:
assert not args.use_gpu, (
"Cannot run PyTorch's built-in dynamic quantization for mlp "
+ "with --use-gpu enabled, because DynamicQuantizedLinear's "
+ "forward function calls 'quantized::linear_dynamic', which does not "
+ "support the 'CUDA' backend. To convert to and run quantized mlp layers "
+ "on the gpu, install torch2trt and enable --use-torch2trt-for-mlp. "
+ "Alternatively, disable --use-gpu to use PyTorch's built-in "
+ "cpu quantization ops for the mlp layers. "
)
if args.quantize_mlp_with_bit == 8:
quantize_dtype = torch.qint8
else:
quantize_dtype = torch.float16
dlrm.top_l = torch.quantization.quantize_dynamic(
dlrm.top_l, {torch.nn.Linear}, quantize_dtype
)
dlrm.bot_l = torch.quantization.quantize_dynamic(
dlrm.bot_l, {torch.nn.Linear}, quantize_dtype
)
# Prep work for embedding tables and model transfer:
# Handling single-cpu and single-gpu modes
# NOTE: This also handles dist-backend modes (CLI args --dist-backend=nccl,
# --dist-backend=ccl, and --dist-backend=mpi) because in these modes each
# process runs in single-gpu mode. For example, if 8 processes are launched
# running dlrm_s_pytorch.py with --dist-backend=nccl --use-gpu, each process
# will run in single-gpu mode, resulting in 8 gpus total running distributed
# training or distributed inference if --inference-only is enabled.
if dlrm.ndevices_available <= 1:
if args.use_fbgemm_gpu:
from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper
dlrm.fbgemm_emb_l = nn.ModuleList(
[
fbgemm_gpu_emb_bag_wrapper(
device,
dlrm.emb_l if dlrm.emb_l else dlrm.emb_l_q,
dlrm.m_spa,
dlrm.quantize_bits,
dlrm.learning_rate,
dlrm.fbgemm_gpu_codegen_pref,
dlrm.requires_grad,
)
]
)
if args.use_gpu:
dlrm = dlrm.to(device)
if dlrm.weighted_pooling == "fixed":
for k, w in enumerate(dlrm.v_W_l):
dlrm.v_W_l[k] = w.to(device)
else:
# Handing Multi-gpu mode
dlrm.bot_l = dlrm.bot_l.to(device)
dlrm.top_l = dlrm.top_l.to(device)
dlrm.prepare_parallel_model(args.ndevices)
assert not args.use_torch2trt_for_mlp, "torch2trt is not supported."
if not args.inference_only:
# specify the optimizer algorithm
opts = {
"sgd": torch.optim.SGD,
"rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad,
"adagrad": torch.optim.Adagrad,
}
# removed distributed code here
parameters = (
dlrm.parameters()
)
self.optimizer = opts[args.optimizer](parameters, lr=args.learning_rate)
self.lr_scheduler = LRPolicyScheduler(
self.optimizer,
args.lr_num_warmup_steps,
args.lr_decay_start_step,
args.lr_num_decay_steps,
)
self.model = dlrm.to(self.device)
# torchbench: prefetch the input to device
if test == "train":
self.ld = prefetch(train_ld, self.device)
elif test == "eval":
self.ld = prefetch(test_ld, self.device)
# Guarantee GPU setup has completed before training or inference starts.
if args.use_gpu:
torch.cuda.synchronize()
def prep(self, args):
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if args.test_mini_batch_size < 0:
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if args.test_num_workers < 0:
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
if args.use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
# we only support 1 device
args.ndevices = 1
def get_module(self) -> Tuple[torch.nn.Module, List[torch.Tensor]]:
for inputBatch in self.ld:
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device)
if self.model.quantize_mlp_input_with_half_call:
X = X.half()
return (self.model, (X, lS_o, lS_i))
def train(self):
args = self.fambench_args
for j, inputBatch in enumerate(self.ld):
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device)
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# forward pass
Z = dlrm_wrap(
self.model,
X,
lS_o,
lS_i,
args.use_gpu,
self.device,
ndevices=args.ndevices,
)
# loss
E = loss_fn_wrap(self.model, self.fambench_args, Z, T, args.use_gpu, self.device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
self.optimizer.zero_grad()
E.backward()
self.optimizer.step()
self.lr_scheduler.step()
def eval(self) -> Tuple[torch.Tensor]:
result = []
args = self.fambench_args
for i, testBatch in enumerate(self.ld):
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch, self.device
)
# forward pass
Z_test = dlrm_wrap(
self.model,
X_test,
lS_o_test,
lS_i_test,
args.use_gpu,
self.device,
ndevices=args.ndevices,
)
result = (Z_test, T_test)
return result
|
import torch.nn as nn
import torch
import sys
import numpy as np
import itertools
from torch._ops import ops
from torch.nn.parameter import Parameter
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import gather, scatter
# fambench imports
# projection
import project
# quotient-remainder trick
from tricks.qr_embedding_bag import QREmbeddingBag
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
layers.training = self.requires_grad
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W)
LL.weight.requires_grad = self.requires_grad
LL.bias.data = torch.tensor(bt)
LL.bias.requires_grad = self.requires_grad
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln, weighted_pooling=None):
# create_emb parameter description
#
# ln parameter:
# ln is a list of all the tables' row counts. E.g. [10,5,16] would mean
# table 0 has 10 rows, table 1 has 5 rows, and table 2 has 16 rows.
#
# m parameter (when m is a single value):
# m is the length of all embedding vectors. All embedding vectors in all
# embedding tables are created to be the same length. E.g. if ln were [3,2,5]
# and m were 4, table 0 would be dimension 3 x 4, table 1 would be 2 x 4,
# and table 2 would be 5 x 4.
#
# m parameter (when m is a list):
# m is a list of all the tables' column counts. E.g. if m were [4,5,6] and
# ln were [3,2,5], table 0 would be dimension 3 x 4, table 1 would be 2 x 5,
# and table 2 would be 5 x 6.
#
# Key to remember:
# embedding table i has shape: ln[i] rows, m columns, when m is a single value.
# embedding table i has shape: ln[i] rows, m[i] columns, when m is a list.
emb_l = nn.ModuleList()
v_W_l = []
for i in range(0, ln.size):
# torchbench: commment distributed
# if ext_dist.my_size > 1:
# if i not in self.local_emb_indices:
# continue
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(
n,
m,
self.qr_collisions,
operation=self.qr_operation,
mode="sum",
sparse=True,
)
elif self.md_flag and n > self.md_threshold:
base = max(m)
_m = m[i] if n > self.md_threshold else base
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=self.requires_grad)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(W, requires_grad=self.requires_grad)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
if weighted_pooling is None:
v_W_l.append(None)
else:
v_W_l.append(torch.ones(n, dtype=torch.float32))
emb_l.append(EE)
return emb_l, v_W_l
def __init__(
self,
args,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
proj_size=0,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
weighted_pooling=None,
loss_function="bce",
learning_rate=0.1,
use_gpu=False,
use_fbgemm_gpu=False,
fbgemm_gpu_codegen_pref="Split",
inference_only=False,
quantize_mlp_with_bit=False,
quantize_emb_with_bit=False,
use_torch2trt_for_mlp=False,
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
# save arguments
self.ntables = len(ln_emb)
self.m_spa = m_spa
self.proj_size = proj_size
self.use_gpu = use_gpu
self.use_fbgemm_gpu = use_fbgemm_gpu
self.fbgemm_gpu_codegen_pref = fbgemm_gpu_codegen_pref
self.requires_grad = not inference_only
self.ndevices_available = ndevices
self.ndevices_in_use = ndevices
self.output_d = 0
self.add_new_weights_to_params = False
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params and not inference_only
self.loss_threshold = loss_threshold
self.loss_function = loss_function
self.learning_rate = learning_rate
if weighted_pooling is not None and weighted_pooling != "fixed":
self.weighted_pooling = "learned"
else:
self.weighted_pooling = weighted_pooling
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# torchbench: comment distributed
# If running distributed, get local slice of embedding tables
# if ext_dist.my_size > 1:
# n_emb = len(ln_emb)
# if n_emb < ext_dist.my_size:
# sys.exit(
# "only (%d) sparse features for (%d) devices, table partitions will fail"
# % (n_emb, ext_dist.my_size)
# )
# self.n_global_emb = n_emb
# self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(
# n_emb
# )
# self.local_emb_slice = ext_dist.get_my_slice(n_emb)
# self.local_emb_indices = list(range(n_emb))[self.local_emb_slice]
# create operators
self.emb_l, self.v_W_l = self.create_emb(m_spa, ln_emb, weighted_pooling)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList(list(map(Parameter, self.v_W_l)))
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
if proj_size > 0:
self.proj_l = project.create_proj(len(ln_emb) + 1, proj_size)
# mlp quantization
self.quantize_mlp_with_bit = quantize_mlp_with_bit
self.use_torch2trt_for_mlp = use_torch2trt_for_mlp
self.quantize_mlp_input_with_half_call = use_gpu and not args.use_torch2trt_for_mlp and args.quantize_mlp_with_bit == 16
# embedding quantization
self.quantize_emb = False
self.emb_l_q = []
self.quantize_bits = 32
# fbgemm_gpu
self.fbgemm_emb_l = []
self.v_W_l_l = [self.v_W_l] if self.weighted_pooling else [None]
self.interact_features_l = []
# specify the loss function
if self.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif self.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif self.loss_function == "wbce":
self.loss_ws = torch.tensor(
np.fromstring(args.loss_weights, dtype=float, sep="-")
)
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit(
"ERROR: --loss-function=" + self.loss_function + " is not supported"
)
def prepare_parallel_model(self, ndevices):
device_ids = range(ndevices)
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
# distribute embeddings (model parallelism)
if self.weighted_pooling is not None:
for k, w in enumerate(self.v_W_l):
self.v_W_l[k] = Parameter(
w.to(torch.device("cuda:" + str(k % ndevices)))
)
if not self.use_fbgemm_gpu:
for k, w in enumerate(self.emb_l):
self.emb_l[k] = w.to(torch.device("cuda:" + str(k % ndevices)))
else:
from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper
self.fbgemm_emb_l, self.v_W_l_l = zip(
*[
(
fbgemm_gpu_emb_bag_wrapper(
torch.device("cuda:" + str(k)),
self.emb_l[k::ndevices]
if self.emb_l
else self.emb_l_q[k::ndevices],
self.m_spa[k::ndevices]
if isinstance(self.m_spa, list)
else self.m_spa,
self.quantize_bits,
self.learning_rate,
self.fbgemm_gpu_codegen_pref,
self.requires_grad,
),
self.v_W_l[k::ndevices] if self.weighted_pooling else None,
)
for k in range(ndevices)
]
)
self.add_new_weights_to_params = True
self.interact_features_l = [self.nn_module_wrapper() for _ in range(ndevices)]
# nn_module_wrapper is used to call functions concurrently across multi-gpus, using parallel_apply,
# which requires an nn.Module subclass.
class nn_module_wrapper(nn.Module):
def __init__(self):
super(DLRM_Net.nn_module_wrapper, self).__init__()
def forward(self, E, x, ly):
return E(x, ly)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
if self.use_fbgemm_gpu:
# Deinterleave and reshape to 2d, so items are grouped by device
# per row. Then parallel apply.
ndevices = len(self.fbgemm_emb_l)
lS_o_l = [lS_o[k::ndevices] for k in range(ndevices)]
lS_i_l = [lS_i[k::ndevices] for k in range(ndevices)]
ly = parallel_apply(
self.fbgemm_emb_l, list(zip(lS_o_l, lS_i_l, self.v_W_l_l))
)
# Interleave and flatten to match non-fbgemm_gpu ly format.
ly = [ly[i % ndevices][i // ndevices] for i in range(self.ntables)]
else:
ly = []
for k, sparse_index_group_batch in enumerate(lS_i):
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
# E = emb_l[k]
if self.v_W_l[k] is not None:
per_sample_weights = self.v_W_l[k].gather(
0, sparse_index_group_batch
)
else:
per_sample_weights = None
if self.quantize_emb:
if self.quantize_bits == 4:
E = ops.quantized.embedding_bag_4bit_rowwise_offsets
elif self.quantize_bits == 8:
E = ops.quantized.embedding_bag_byte_rowwise_offsets
QV = E(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(QV)
else:
E = self.emb_l[k]
V = E(
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(V)
# print(ly)
return ly
# using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu
def quantize_embedding(self, bits):
n = len(self.emb_l)
self.emb_l_q = [None] * n
for k in range(n):
if bits == 4:
self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack(
self.emb_l[k].weight
)
elif bits == 8:
self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack(
self.emb_l[k].weight
)
elif bits == 16:
self.emb_l_q[k] = self.emb_l[k].half().weight
else:
return
self.emb_l = None
self.quantize_emb = True
self.quantize_bits = bits
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
if self.proj_size > 0:
R = project.project(T, x, self.proj_l)
else:
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
# torchbench: only enable sequential forward
return self.sequential_forward(dense_x, lS_o, lS_i)
# if ext_dist.my_size > 1:
# # multi-node multi-device run
# return self.distributed_forward(dense_x, lS_o, lS_i)
# elif self.ndevices_available <= 1:
# # single device run
# return self.sequential_forward(dense_x, lS_o, lS_i)
# else:
# # single-node multi-device run
# return self.parallel_forward(dense_x, lS_o, lS_i)
# torchbench: disable distributed forward
# def distributed_forward(self, dense_x, lS_o, lS_i):
# batch_size = dense_x.size()[0]
# # WARNING: # of ranks must be <= batch size in distributed_forward call
# if batch_size < ext_dist.my_size:
# sys.exit(
# "ERROR: batch_size (%d) must be larger than number of ranks (%d)"
# % (batch_size, ext_dist.my_size)
# )
# if batch_size % ext_dist.my_size != 0:
# sys.exit(
# "ERROR: batch_size %d can not split across %d ranks evenly"
# % (batch_size, ext_dist.my_size)
# )
# dense_x = dense_x[ext_dist.get_my_slice(batch_size)]
# lS_o = lS_o[self.local_emb_slice]
# lS_i = lS_i[self.local_emb_slice]
# if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)):
# sys.exit(
# "ERROR: corrupted model input detected in distributed_forward call"
# )
# # embeddings
# with record_function("DLRM embedding forward"):
# ly = self.apply_emb(lS_o, lS_i)
# # WARNING: Note that at this point we have the result of the embedding lookup
# # for the entire batch on each rank. We would like to obtain partial results
# # corresponding to all embedding lookups, but part of the batch on each rank.
# # Therefore, matching the distribution of output of bottom mlp, so that both
# # could be used for subsequent interactions on each device.
# if self.ntables != len(ly):
# sys.exit("ERROR: corrupted intermediate result in distributed_forward call")
# a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank)
# with record_function("DLRM bottom mlp forward"):
# x = self.apply_mlp(dense_x, self.bot_l)
# ly = a2a_req.wait()
# ly = list(ly)
# # interactions
# with record_function("DLRM interaction forward"):
# z = self.interact_features(x, ly)
# # top mlp
# with record_function("DLRM top mlp forward"):
# # quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used.
# if self.quantize_mlp_input_with_half_call:
# z = z.half()
# p = self.apply_mlp(z, self.top_l)
# # clamp output if needed
# if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
# z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
# else:
# z = p
# return z
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i)
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used.
if self.quantize_mlp_input_with_half_call:
z = z.half()
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices_available, batch_size, self.ntables)
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.ndevices_in_use != ndevices:
self.ndevices_in_use = ndevices
self.prepare_parallel_model(ndevices)
elif self.sync_dense_params:
# When training, replicate the new/updated mlp weights each iteration.
# For inference-only, this code should never run.
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
lS_o = [
lS_o[k].to(torch.device("cuda:" + str(k % ndevices)))
for k in range(self.ntables)
]
lS_i = [
lS_i[k].to(torch.device("cuda:" + str(k % ndevices)))
for k in range(self.ntables)
]
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if self.ntables != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = [scatter(ly[k], device_ids, dim=0) for k in range(self.ntables)]
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = parallel_apply(self.interact_features_l, list(zip(itertools.repeat(self.interact_features),x,ly)))
# debug prints
# print(z)
if self.quantize_mlp_input_with_half_call:
z = [tens.half() for tens in z]
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def print_weights(self):
if self.use_fbgemm_gpu and len(self.fbgemm_emb_l):
ntables_l = [
len(e.fbgemm_gpu_emb_bag.embedding_specs) for e in self.fbgemm_emb_l
]
for j in range(ntables_l[0] + 1):
for k, e in enumerate(self.fbgemm_emb_l):
if j < ntables_l[k]:
print(
e.fbgemm_gpu_emb_bag.split_embedding_weights()[j]
.detach()
.cpu()
.numpy()
)
elif self.quantize_bits != 32:
for e in self.emb_l_q:
print(e.data.detach().cpu().numpy())
else: # if self.emb_l:
for param in self.emb_l.parameters():
print(param.detach().cpu().numpy())
if isinstance(self.v_W_l, nn.ParameterList):
for param in self.v_W_l.parameters():
print(param.detach().cpu().numpy())
for param in self.bot_l.parameters():
print(param.detach().cpu().numpy())
for param in self.top_l.parameters():
print(param.detach().cpu().numpy()) |
import torch
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b, device):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None
def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1):
if dlrm.quantize_mlp_input_with_half_call:
X = X.half()
if use_gpu:
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return dlrm(X.to(device), lS_o, lS_i)
def loss_fn_wrap(dlrm, args, Z, T, use_gpu, device):
if args.loss_function == "mse" or args.loss_function == "bce":
return dlrm.loss_fn(Z, T.to(device))
elif args.loss_function == "wbce":
loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = dlrm.loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
return loss_sc_.mean()
def prefetch(dl, device):
out = []
for inputBatch in dl:
X, lS_o, lS_i, T = inputBatch
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
out.append(tuple([X.to(device), lS_o, lS_i, T]))
return out |
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet;
# Note that FAMBench does support it.
import torch.nn as nn
import torch
import os
import sys
import numpy as np
from torchbenchmark import REPO_PATH
# This file assumes fbgemm_gpu is installed
import fbgemm_gpu
from fbgemm_gpu import split_table_batched_embeddings_ops
from fbgemm_gpu.split_table_batched_embeddings_ops import (
CacheAlgorithm,
PoolingMode,
OptimType,
SparseType,
SplitTableBatchedEmbeddingBagsCodegen,
IntNBitTableBatchedEmbeddingBagsCodegen,
)
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag
# quantize_fbgemm_gpu_embedding_bag is partially lifted from
# fbgemm_gpu/test/split_embedding_inference_converter.py, def _quantize_split_embs.
# Converts SplitTableBatchedEmbeddingBagsCodegen to IntNBitTableBatchedEmbeddingBagsCodegen
def quantize_fbgemm_gpu_embedding_bag(model, quantize_type, device):
embedding_specs = []
if device.type == "cpu":
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST
else:
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
for (E, D, _, _) in model.embedding_specs:
weights_ty = quantize_type
if D % weights_ty.align_size() != 0:
assert D % 4 == 0
weights_ty = (
SparseType.FP16
) # fall back to FP16 if dimension couldn't be aligned with the required size
embedding_specs.append(("", E, D, weights_ty, emb_location))
q_model = (
split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
pooling_mode=model.pooling_mode,
device=device,
)
)
q_model.initialize_weights()
for t, (_, _, _, weight_ty, _) in enumerate(embedding_specs):
if weight_ty == SparseType.FP16:
original_weight = model.split_embedding_weights()[t]
q_weight = original_weight.half()
weights = torch.tensor(q_weight.cpu().numpy().view(np.uint8))
q_model.split_embedding_weights()[t][0].data.copy_(weights)
elif weight_ty == SparseType.INT8:
original_weight = model.split_embedding_weights()[t]
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
original_weight
)
weights = q_weight[:, :-8]
scale_shift = torch.tensor(
q_weight[:, -8:]
.contiguous()
.cpu()
.numpy()
.view(np.float32)
.astype(np.float16)
.view(np.uint8)
)
q_model.split_embedding_weights()[t][0].data.copy_(weights)
q_model.split_embedding_weights()[t][1].data.copy_(scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
original_weight = model.split_embedding_weights()[t]
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
original_weight,
bit_rate=quantize_type.bit_rate(),
)
weights = q_weight[:, :-4]
scale_shift = torch.tensor(
q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8)
)
q_model.split_embedding_weights()[t][0].data.copy_(weights)
q_model.split_embedding_weights()[t][1].data.copy_(scale_shift)
return q_model
def create_fbgemm_gpu_emb_bag(
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference=None,
requires_grad=True,
):
if isinstance(emb_l[0], PrEmbeddingBag):
emb_l = [e.embs for e in emb_l]
if isinstance(emb_l[0], nn.EmbeddingBag):
emb_l = [e.weight for e in emb_l]
Es = [e.shape[0] for e in emb_l]
if isinstance(m_spa, list):
Ds = m_spa
else:
Ds = [m_spa for _ in emb_l]
if device.type == "cpu":
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
else:
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
pooling_mode = PoolingMode.SUM
cache_algorithm = CacheAlgorithm.LRU
sparse_type_dict = {
4: SparseType.INT4,
8: SparseType.INT8,
16: SparseType.FP16,
32: SparseType.FP32,
}
codegen_type_dict = {
4: "IntN",
8: "Split" if codegen_preference != "IntN" else "IntN",
16: "Split" if codegen_preference != "IntN" else "IntN",
32: "Split",
}
codegen_type = codegen_type_dict[quantize_bits]
quantize_type = sparse_type_dict[quantize_bits]
if codegen_type == "IntN":
# Create non-quantized model and then call quantize_fbgemm_gpu_embedding_bag
fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E, # num of rows in the table
D, # num of columns in the table
split_table_batched_embeddings_ops.EmbeddingLocation.HOST,
split_table_batched_embeddings_ops.ComputeDevice.CPU,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=SparseType.FP32,
optimizer=OptimType.EXACT_SGD,
learning_rate=learning_rate,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
).to(device)
if quantize_type == quantize_type.FP16:
weights = fbgemm_gpu_emb_bag.split_embedding_weights()
for i, emb in enumerate(weights):
emb.data.copy_(emb_l[i])
elif quantize_type == quantize_type.INT8:
# copy quantized values upsampled/recasted to FP32
for i in range(len(Es)):
fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(emb_l[i])
)
elif quantize_type == quantize_type.INT4:
# copy quantized values upsampled/recasted to FP32
for i in range(len(Es)):
fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
emb_l[i],
bit_rate=quantize_type.bit_rate(),
)
)
fbgemm_gpu_emb_bag = quantize_fbgemm_gpu_embedding_bag(
fbgemm_gpu_emb_bag, quantize_type, device
)
else:
fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E, # num of rows in the table
D, # num of columns in the table
emb_location,
compute_device,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=quantize_type,
optimizer=OptimType.EXACT_SGD,
learning_rate=learning_rate,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
).to(device)
weights = fbgemm_gpu_emb_bag.split_embedding_weights()
for i, emb in enumerate(weights):
emb.data.copy_(emb_l[i])
if not requires_grad:
torch.no_grad()
torch.set_grad_enabled(False)
return fbgemm_gpu_emb_bag
# The purpose of this wrapper is to encapsulate the format conversions to/from fbgemm_gpu
# so parallel_apply() executes the format-in -> fbgemm_gpu op -> format-out instructions
# for each respective GPU in parallel.
class fbgemm_gpu_emb_bag_wrapper(nn.Module):
def __init__(
self,
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference,
requires_grad,
):
super(fbgemm_gpu_emb_bag_wrapper, self).__init__()
self.fbgemm_gpu_emb_bag = create_fbgemm_gpu_emb_bag(
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference,
requires_grad,
)
self.device = device
self.m_spa = m_spa
# create cumsum array for mixed dimension support
if isinstance(m_spa, list):
self.m_spa_cumsum = np.cumsum([0] + m_spa)
if not requires_grad:
torch.no_grad()
torch.set_grad_enabled(False)
def forward(self, lS_o, lS_i, v_W_l=None):
# convert offsets to fbgemm format
lengths_list = list(map(len, lS_i))
indices_lengths_cumsum = np.cumsum([0] + lengths_list)
if isinstance(lS_o, list):
lS_o = torch.stack(lS_o)
lS_o = lS_o.to(self.device)
lS_o += torch.from_numpy(indices_lengths_cumsum[:-1, np.newaxis]).to(
self.device
)
numel = torch.tensor([indices_lengths_cumsum[-1]], dtype=torch.long).to(
self.device
)
lS_o = torch.cat((lS_o.flatten(), numel))
# create per_sample_weights
if v_W_l:
per_sample_weights = torch.cat(
[a.gather(0, b) for a, b in zip(v_W_l, lS_i)]
)
else:
per_sample_weights = None
# convert indices to fbgemm_gpu format
if isinstance(lS_i, torch.Tensor):
lS_i = [lS_i]
lS_i = torch.cat(lS_i, dim=0).to(self.device)
if isinstance(self.fbgemm_gpu_emb_bag, IntNBitTableBatchedEmbeddingBagsCodegen):
lS_o = lS_o.int()
lS_i = lS_i.int()
# gpu embedding bag op
ly = self.fbgemm_gpu_emb_bag(lS_i, lS_o, per_sample_weights)
# convert the results to the next layer's input format.
if isinstance(self.m_spa, list):
# handle mixed dimensions case.
ly = [
ly[:, s:e]
for (s, e) in zip(self.m_spa_cumsum[:-1], self.m_spa_cumsum[1:])
]
else:
# handle case in which all tables share the same column dimension.
cols = self.m_spa
ntables = len(self.fbgemm_gpu_emb_bag.embedding_specs)
ly = ly.reshape(-1, ntables, cols).swapaxes(0, 1)
ly = list(ly)
return ly |
# Original source:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py
import sys
import torch
import argparse
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
def validate_fambench_args(args):
if args.weighted_pooling is not None:
if args.qr_flag:
sys.exit("ERROR: quotient remainder with weighted pooling is not supported")
if args.md_flag:
sys.exit("ERROR: mixed dimensions with weighted pooling is not supported")
if args.quantize_emb_with_bit in [4, 8]:
if args.qr_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with quotient remainder is not supported"
)
if args.md_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with mixed dimensions is not supported"
)
if args.quantize_emb_with_bit in [4, 8, 16] and (
not args.use_fbgemm_gpu
):
try:
import fbgemm_gpu
except ImportError:
sys.exit("Failed to import fbgemm_gpu module.\n")
extra_info = ""
if not args.use_fbgemm_gpu:
extra_info += "--use-fbgemm-gpu not set. "
if not args.inference_only:
sys.exit(
"ERROR: Training quantized embeddings requires fbgemm_gpu. "
+ extra_info
)
elif args.use_gpu:
sys.exit(
"ERROR: Quantized embeddings on GPU requires fbgemm_gpu. " + extra_info
)
elif args.quantize_emb_with_bit == 16:
sys.exit(
"ERROR: 16-bit quantized embeddings requires fbgemm_gpu. " + extra_info
)
assert args.quantize_emb_with_bit in [
4,
8,
16,
32,
], "only support 4/8/16/32-bit but got {}".format(args.quantize_emb_with_bit)
if args.use_gpu:
assert torch.cuda.is_available(), "No cuda device is available."
# validations by torchbench (distributed is not supported)
# we don't support fbgemm_gpu
assert not args.use_fbgemm_gpu, "fbgemm_gpu is not supported."
# we don't support torch2trt for mlp
assert not args.use_torch2trt_for_mlp, "torch2trt for mlp is not supported."
# we only support random dataset for now
assert args.data_generation == "random", f"only random data generator is supported right now, but get {args.data_generation}."
def parse_fambench_args(args):
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2"
)
parser.add_argument("--arch-project-size", type=int, default=0)
# j will be replaced with the table number
parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument(
"--weighted-pooling", type=str, choices=["fixed", "learned", None], default=None
)
# embedding table options
parser.add_argument("--md-flag", action="store_true", default=False)
parser.add_argument("--md-threshold", type=int, default=200)
parser.add_argument("--md-temperature", type=float, default=0.3)
parser.add_argument("--md-round-dims", action="store_true", default=False)
parser.add_argument("--qr-flag", action="store_true", default=False)
parser.add_argument("--qr-threshold", type=int, default=200)
parser.add_argument("--qr-operation", type=str, default="mult")
parser.add_argument("--qr-collisions", type=int, default=4)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0"
) # for wbce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument(
"--rand-data-dist", type=str, default="uniform"
) # uniform or gaussian
parser.add_argument("--rand-data-min", type=float, default=0)
parser.add_argument("--rand-data-max", type=float, default=1)
parser.add_argument("--rand-data-mu", type=float, default=-1)
parser.add_argument("--rand-data-sigma", type=float, default=1)
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--optimizer", type=str, default="sgd")
parser.add_argument(
"--dataset-multiprocessing",
action="store_true",
default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.",
)
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# quantize
parser.add_argument("--quantize-mlp-with-bit", type=int, default=32)
parser.add_argument("--quantize-emb-with-bit", type=int, default=32)
# onnx
parser.add_argument("--save-onnx", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--use-fbgemm-gpu", action="store_true", default=False)
parser.add_argument(
"--fbgemm-gpu-codegen-pref",
type=str,
choices=["Split", "IntN"],
default="Split",
)
# torch2trt
parser.add_argument("--use-torch2trt-for-mlp", action="store_true", default=False)
# distributed
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--dist-backend", type=str, default="")
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--print-wall-time", action="store_true", default=False)
parser.add_argument("--print-accumulated-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt")
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action="store_true", default=False)
parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False)
# mlperf gradient accumulation iterations
parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1)
# LR policy
parser.add_argument("--lr-num-warmup-steps", type=int, default=0)
parser.add_argument("--lr-decay-start-step", type=int, default=0)
parser.add_argument("--lr-num-decay-steps", type=int, default=0)
parser.add_argument("--precache-ml-data", type=int, nargs='?', default=None, const=sys.maxsize)
parser.add_argument("--warmup-steps", type=int, default=0)
# FB5 Logging
parser.add_argument("--fb5logger", type=str, default=None)
parser.add_argument("--fb5config", type=str, default="tiny")
args = parser.parse_args(args)
return args |
import os
import sys
import torch
import subprocess
from torchbenchmark import REPO_PATH
def update_fambench_submodule():
"Update FAMBench submodule of the benchmark repo"
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules","FAMBench")]
subprocess.check_call(update_command, cwd=REPO_PATH)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == "__main__":
update_fambench_submodule()
pip_install_requirements()
|
import torch
import sys
import numpy as np
# data generation
import dlrm_data_pytorch as dp
def prep_data(args):
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
table_feature_map = {idx: idx for idx in range(len(train_data.counts))}
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(
list(
map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb,
)
)
)
else:
ln_emb = np.array(ln_emb)
m_den = train_data.m_den
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader(
args, ln_emb, m_den, cache_size=args.precache_ml_data
)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
nbatches_in_use = nbatches_test if args.inference_only else nbatches
assert nbatches_in_use > args.warmup_steps, (f"Change --warmup-steps={args.warmup_steps} to be lower than {nbatches_in_use}.")
args.ln_emb = ln_emb.tolist()
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
ln_emb = np.asarray(ln_emb)
num_fea = ln_emb.size + 1 # num sparse + num dense features
if args.use_fbgemm_gpu:
assert m_spa % 4 == 0, (
f"{m_spa} % 4 is not 0, but fbgemm_gpu requires the embedding dim "
+ "(--arch-sparse-feature-size number) to be evenly divisible by 4."
)
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_project_size > 0:
num_int = num_fea * args.arch_project_size + m_den_out
else:
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if args.qr_flag:
if args.qr_operation == "concat" and 2 * m_spa != m_den_out:
sys.exit(
"ERROR: 2 arch-sparse-feature-size "
+ str(2 * m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
+ " (note that the last dim of bottom mlp must be 2x the embedding dim)"
)
if args.qr_operation != "concat" and m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
else:
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
# assign mixed dimensions if applicable
if args.md_flag:
m_spa = md_solver(
torch.tensor(ln_emb),
args.md_temperature, # alpha
d0=m_spa,
round_dim=args.md_round_dims,
).tolist()
if args.use_fbgemm_gpu:
for m in m_spa:
assert m % 4 == 0, (
"Found an incompatible embedding dim in m_spa. "
+ f"{m} % 4 is not 0, but fbgemm_gpu requires the "
+ "embedding dim to be evenly divisible by 4."
)
return ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld |
import torch
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader
from .data.dlrm_dataloader import get_dataloader
except ImportError:
pass
import itertools
import os
from pyre_extensions import none_throws
from torch import distributed as dist
from torchbenchmark.tasks import RECOMMENDATION
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchrec.distributed import TrainPipelineSparseDist
from torchrec.distributed.shard import shard_modules
from torchrec.models.dlrm import DLRM, DLRM_DCN, DLRM_Projection, DLRMTrain
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.optim.apply_optimizer_in_backward import apply_optimizer_in_backward
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
from torchrec.optim.optimizers import in_backward_optimizer_filter
from ...util.model import BenchmarkModel
from .args import InteractionType, parse_args
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
DEFAULT_TRAIN_BSIZE = 1024
DEFAULT_EVAL_BSIZE = 1024
CANNOT_SET_CUSTOM_OPTIMIZER = True
# Deepcopy will OOM in correctness testing
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
args = parse_args(self.extra_args)
backend = "nccl" if self.device == "cuda" else "gloo"
device = torch.device(self.device)
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
if not dist.is_initialized():
dist.init_process_group(backend=backend)
# initialize example data
if self.test == "train":
args.batch_size = self.batch_size
loader = get_dataloader(args, backend, "train")
if self.test == "eval":
args.test_batch_size = self.batch_size
loader = get_dataloader(args, backend, "test")
self.iterator = itertools.cycle(iter(loader))
self.example_inputs = next(self.iterator).to(device)
# parse the args
args.dense_arch_layer_sizes = [int(x) for x in args.dense_arch_layer_sizes.split(',') if x.strip().isdigit()]
args.over_arch_layer_sizes = [int(x) for x in args.over_arch_layer_sizes.split(',') if x.strip().isdigit()]
args.interaction_branch1_layer_sizes = [int(x) for x in args.interaction_branch1_layer_sizes.split(',') if x.strip().isdigit()]
args.interaction_branch2_layer_sizes = [int(x) for x in args.interaction_branch2_layer_sizes.split(',') if x.strip().isdigit()]
assert args.in_memory_binary_criteo_path == None and args.synthetic_multi_hot_criteo_path == None, \
f"Torchbench only supports random data inputs."
eb_configs = [
EmbeddingBagConfig(
name=f"t_{feature_name}",
embedding_dim=args.embedding_dim,
num_embeddings=none_throws(args.num_embeddings_per_feature)[feature_idx]
if args.num_embeddings is None
else args.num_embeddings,
feature_names=[feature_name],
)
for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES)
]
dlrm_model = DLRM_DCN(
embedding_bag_collection=EmbeddingBagCollection(
tables=eb_configs, device=device
),
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=args.dense_arch_layer_sizes,
over_arch_layer_sizes=args.over_arch_layer_sizes,
dcn_num_layers=args.dcn_num_layers,
dcn_low_rank_dim=args.dcn_low_rank_dim,
dense_device=device,
)
train_model = DLRMTrain(dlrm_model)
# This will apply the Adagrad optimizer in the backward pass for the embeddings (sparse_arch). This means that
# the optimizer update will be applied in the backward pass, in this case through a fused op.
# TorchRec will use the FBGEMM implementation of EXACT_ADAGRAD. For GPU devices, a fused CUDA kernel is invoked. For CPU, FBGEMM_GPU invokes CPU kernels
# https://github.com/pytorch/FBGEMM/blob/2cb8b0dff3e67f9a009c4299defbd6b99cc12b8f/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py#L676-L678
apply_optimizer_in_backward(
torch.optim.Adagrad,
train_model.model.sparse_arch.parameters(),
{"lr": args.learning_rate},
)
if args.shard_model:
self.model = shard_modules(
module=train_model,
device=device
).to(device)
else:
self.model = train_model.to(device)
dense_optimizer = KeyedOptimizerWrapper(
dict(in_backward_optimizer_filter(self.model.named_parameters())),
lambda params: torch.optim.Adagrad(params, lr=args.learning_rate),
)
# fused optimizer will already be called
opt = CombinedOptimizer([dense_optimizer])
if args.multi_hot_sizes is not None:
raise RuntimeError("Multi-hot is not supported in TorchBench.")
if self.test == "train":
self.opt = opt
self.train_pipeline = TrainPipelineSparseDist(
self.model,
opt,
device,
)
self.model.train()
elif self.test == "eval":
self.model.eval()
def get_module(self):
return self.model, (self.example_inputs, )
def train(self):
self.train_pipeline.progress(self.iterator)
def eval(self):
with torch.no_grad():
_loss, logits = self.model(self.example_inputs)
return logits
|
import argparse
from enum import Enum
from typing import List
class InteractionType(Enum):
ORIGINAL = "original"
DCN = "dcn"
PROJECTION = "projection"
def __str__(self):
return self.value
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="torchrec dlrm example trainer")
parser.add_argument(
"--epochs",
type=int,
default=1,
help="number of epochs to train",
)
parser.add_argument(
"--batch_size",
type=int,
default=1024,
help="batch size to use for training",
)
parser.add_argument(
"--drop_last_training_batch",
dest="drop_last_training_batch",
action="store_true",
help="Drop the last non-full training batch",
)
parser.add_argument(
"--test_batch_size",
type=int,
default=None,
help="batch size to use for validation and testing",
)
parser.add_argument(
"--limit_train_batches",
type=int,
default=None,
help="number of train batches",
)
parser.add_argument(
"--limit_val_batches",
type=int,
default=None,
help="number of validation batches",
)
parser.add_argument(
"--limit_test_batches",
type=int,
default=None,
help="number of test batches",
)
parser.add_argument(
"--dataset_name",
type=str,
default="criteo_1t",
help="dataset for experiment, current support criteo_1tb, criteo_kaggle",
)
parser.add_argument(
"--num_embeddings",
type=int,
default=100_000,
help="max_ind_size. The number of embeddings in each embedding table. Defaults"
" to 100_000 if num_embeddings_per_feature is not supplied.",
)
parser.add_argument(
"--num_embeddings_per_feature",
type=str,
default=None,
help="Comma separated max_ind_size per sparse feature. The number of embeddings"
" in each embedding table. 26 values are expected for the Criteo dataset.",
)
parser.add_argument(
"--dense_arch_layer_sizes",
type=str,
default="512,256,64",
help="Comma separated layer sizes for dense arch.",
)
parser.add_argument(
"--over_arch_layer_sizes",
type=str,
default="512,512,256,1",
help="Comma separated layer sizes for over arch.",
)
parser.add_argument(
"--embedding_dim",
type=int,
default=64,
help="Size of each embedding.",
)
parser.add_argument(
"--interaction_branch1_layer_sizes",
type=str,
default="2048,2048",
help="Comma separated layer sizes for interaction branch1 (only on dlrm with projection).",
)
parser.add_argument(
"--interaction_branch2_layer_sizes",
type=str,
default="2048,2048",
help="Comma separated layer sizes for interaction branch2 (only on dlrm with projection).",
)
parser.add_argument(
"--dcn_num_layers",
type=int,
default=3,
help="Number of DCN layers in interaction layer (only on dlrm with DCN).",
)
parser.add_argument(
"--dcn_low_rank_dim",
type=int,
default=512,
help="Low rank dimension for DCN in interaction layer (only on dlrm with DCN).",
)
parser.add_argument(
"--undersampling_rate",
type=float,
help="Desired proportion of zero-labeled samples to retain (i.e. undersampling zero-labeled rows)."
" Ex. 0.3 indicates only 30pct of the rows with label 0 will be kept."
" All rows with label 1 will be kept. Value should be between 0 and 1."
" When not supplied, no undersampling occurs.",
)
parser.add_argument(
"--seed",
type=int,
help="Random seed for reproducibility.",
)
parser.add_argument(
"--pin_memory",
dest="pin_memory",
action="store_true",
help="Use pinned memory when loading data.",
)
parser.add_argument(
"--mmap_mode",
dest="mmap_mode",
action="store_true",
help="--mmap_mode mmaps the dataset."
" That is, the dataset is kept on disk but is accessed as if it were in memory."
" --mmap_mode is intended mostly for faster debugging. Use --mmap_mode to bypass"
" preloading the dataset when preloading takes too long or when there is "
" insufficient memory available to load the full dataset.",
)
parser.add_argument(
"--in_memory_binary_criteo_path",
type=str,
default=None,
help="Directory path containing the Criteo dataset npy files.",
)
parser.add_argument(
"--synthetic_multi_hot_criteo_path",
type=str,
default=None,
help="Directory path containing the MLPerf v2 synthetic multi-hot dataset npz files.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=15.0,
help="Learning rate.",
)
parser.add_argument(
"--shuffle_batches",
dest="shuffle_batches",
action="store_true",
help="Shuffle each batch during training.",
)
parser.add_argument(
"--shuffle_training_set",
dest="shuffle_training_set",
action="store_true",
help="Shuffle the training set in memory. This will override mmap_mode",
)
parser.add_argument(
"--validation_freq_within_epoch",
type=int,
default=None,
help="Frequency at which validation will be run within an epoch.",
)
parser.set_defaults(
pin_memory=None,
mmap_mode=None,
drop_last=None,
shuffle_batches=None,
shuffle_training_set=None,
)
parser.add_argument(
"--collect_multi_hot_freqs_stats",
dest="collect_multi_hot_freqs_stats",
action="store_true",
help="Flag to determine whether to collect stats on freq of embedding access.",
)
parser.add_argument(
"--multi_hot_sizes",
type=str,
default=None,
help="Comma separated multihot size per sparse feature. 26 values are expected for the Criteo dataset.",
)
parser.add_argument(
"--multi_hot_distribution_type",
type=str,
choices=["uniform", "pareto"],
default=None,
help="Multi-hot distribution options.",
)
parser.add_argument("--lr_warmup_steps", type=int, default=0)
parser.add_argument("--lr_decay_start", type=int, default=0)
parser.add_argument("--lr_decay_steps", type=int, default=0)
parser.add_argument(
"--print_lr",
action="store_true",
help="Print learning rate every iteration.",
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help="Enable TensorFloat-32 mode for matrix multiplications on A100 (or newer) GPUs.",
)
parser.add_argument(
"--print_sharding_plan",
action="store_true",
help="Print the sharding plan used for each embedding table.",
)
parser.add_argument(
"--shard_model",
action="store_true",
help="Shard the model and run it distributed.",
)
return parser.parse_args(argv)
|
import subprocess
import sys
import os
from pathlib import Path
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import (
CAT_FEATURE_COUNT,
DAYS,
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
InMemoryBinaryCriteoIterDataPipe,
)
from torchrec.datasets.random import RandomRecDataset
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:multi_hot_criteo
from data.multi_hot_criteo import MultiHotCriteoIterDataPipe
except ImportError:
pass
# internal import
try:
from .multi_hot_criteo import MultiHotCriteoIterDataPipe # noqa F811
except ImportError:
pass
STAGES = ["train", "val", "test"]
def _get_random_dataloader(
args: argparse.Namespace,
stage: str,
) -> DataLoader:
attr = f"limit_{stage}_batches"
num_batches = getattr(args, attr)
if stage in ["val", "test"] and args.test_batch_size is not None:
batch_size = args.test_batch_size
else:
batch_size = args.batch_size
return DataLoader(
RandomRecDataset(
keys=DEFAULT_CAT_NAMES,
batch_size=batch_size,
hash_size=args.num_embeddings,
hash_sizes=args.num_embeddings_per_feature
if hasattr(args, "num_embeddings_per_feature")
else None,
manual_seed=getattr(args, "seed", None),
ids_per_feature=1,
num_dense=len(DEFAULT_INT_NAMES),
num_batches=num_batches,
),
batch_size=None,
batch_sampler=None,
pin_memory=args.pin_memory,
num_workers=0,
)
def _get_in_memory_dataloader(
args: argparse.Namespace,
stage: str,
) -> DataLoader:
if args.in_memory_binary_criteo_path is not None:
dir_path = args.in_memory_binary_criteo_path
sparse_part = "sparse.npy"
datapipe = InMemoryBinaryCriteoIterDataPipe
else:
dir_path = args.synthetic_multi_hot_criteo_path
sparse_part = "sparse_multi_hot.npz"
datapipe = MultiHotCriteoIterDataPipe
if stage == "train":
stage_files: List[List[str]] = [
[os.path.join(dir_path, f"day_{i}_dense.npy") for i in range(DAYS - 1)],
[os.path.join(dir_path, f"day_{i}_{sparse_part}") for i in range(DAYS - 1)],
[os.path.join(dir_path, f"day_{i}_labels.npy") for i in range(DAYS - 1)],
]
elif stage in ["val", "test"]:
stage_files: List[List[str]] = [
[os.path.join(dir_path, f"day_{DAYS-1}_dense.npy")],
[os.path.join(dir_path, f"day_{DAYS-1}_{sparse_part}")],
[os.path.join(dir_path, f"day_{DAYS-1}_labels.npy")],
]
if stage in ["val", "test"] and args.test_batch_size is not None:
batch_size = args.test_batch_size
else:
batch_size = args.batch_size
dataloader = DataLoader(
datapipe(
stage,
*stage_files, # pyre-ignore[6]
batch_size=batch_size,
rank=dist.get_rank(),
world_size=dist.get_world_size(),
drop_last=args.drop_last_training_batch if stage == "train" else False,
shuffle_batches=args.shuffle_batches,
shuffle_training_set=args.shuffle_training_set,
shuffle_training_set_random_seed=args.seed,
mmap_mode=args.mmap_mode,
hashes=args.num_embeddings_per_feature
if args.num_embeddings is None
else ([args.num_embeddings] * CAT_FEATURE_COUNT),
),
batch_size=None,
pin_memory=args.pin_memory,
collate_fn=lambda x: x,
)
return dataloader
def get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader:
"""
Gets desired dataloader from dlrm_main command line options. Currently, this
function is able to return either a DataLoader wrapped around a RandomRecDataset or
a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe.
Args:
args (argparse.Namespace): Command line options supplied to dlrm_main.py's main
function.
backend (str): "nccl" or "gloo".
stage (str): "train", "val", or "test".
Returns:
dataloader (DataLoader): PyTorch dataloader for the specified options.
"""
stage = stage.lower()
if stage not in STAGES:
raise ValueError(f"Supplied stage was {stage}. Must be one of {STAGES}.")
args.pin_memory = (
(backend == "nccl") if not hasattr(args, "pin_memory") else args.pin_memory
)
if (
args.in_memory_binary_criteo_path is None
and args.synthetic_multi_hot_criteo_path is None
):
return _get_random_dataloader(args, stage)
else:
return _get_in_memory_dataloader(args, stage)
|
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceGenerationModel
class Model(HuggingFaceGenerationModel):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_GPT2_generate", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict
from torchbenchmark import REPO_PATH
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
sys.path.insert(0, LIT_LLAMA_PATH)
from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
from lit_llama import LLaMA, Tokenizer
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_EVAL_BSIZE = 1
DEFAULT_TRAIN_BSIZE = 4 # micro_batch_size in lora.py
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# From finetune/lora.py hyperparameters
lora_r = 8
lora_alpha = 16
lora_dropout = 0.05
checkpoint_path = os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth")
if not os.path.exists(checkpoint_path):
raise NotImplementedError("checkpoint doesn't exist")
with lazy_load(checkpoint_path) as checkpoint, lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True):
name = llama_model_lookup(checkpoint)
with EmptyInitOnDevice(device=device):
model = LLaMA.from_name(name)
# LoRA weights won't be in base checkpoint
model.load_state_dict(checkpoint, strict=False)
mark_only_lora_as_trainable(model)
self.model = model
self.seq_len = 32
self.max_seq_len = 64
self.example_inputs = (
torch.ones([self.batch_size, self.seq_len], dtype=torch.int32, device=self.device),
self.max_seq_len,
)
def get_module(self):
return self.model, self.example_inputs
def train(self):
logits = self.model(*self.example_inputs)
logits.sum().backward()
# meh this sucks
def eval(self):
self.model.eval()
with torch.no_grad():
logits = self.model(*self.example_inputs)
return (logits,)
|
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
HuggingFaceAuthMixin.__init__(self)
super().__init__(name="llama_v2_70b", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
def train(self):
return NotImplementedError("FSDP should implement a training loop")
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# https://huggingface.co/mosaicml/mpt-7b
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_MPT_7b_instruct", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
def eval(self):
super().eval() |
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name, trust_remote_code=True) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="gcn", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("GCN doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
HuggingFaceAuthMixin.__init__(self)
super().__init__(name="llama_v2_7b", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
def train(self):
return NotImplementedError("FSDP should implement a training loop")
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="sage", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("Sage doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from typing import Optional, List
from contextlib import contextmanager, ExitStack
from typing import ContextManager
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def nested(*contexts):
"""
Chain and apply a list of contexts
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx())
yield contexts
class E2EBenchmarkModel(metaclass=PostInitProcessor):
"""
A base class for adding models for all e2e models.
"""
def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):
self.test = test
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report."
self.batch_size = batch_size
if not self.batch_size:
self.batch_size = self.DEFAULT_TRAIN_BSIZE if test == "train" else self.DEFAULT_EVAL_BSIZE
# If the model doesn't implement test or eval test
# its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None
if not self.batch_size:
raise NotImplementedError(f"Test {test} is not implemented.")
self.extra_args = extra_args
if "--torchdynamo" in self.extra_args:
self.dynamo = True
from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args
self.opt_args, self.extra_args = parse_torchdynamo_args(self.extra_args)
else:
self.dynamo = False
# Run the post processing for model acceleration
def __post__init__(self):
# sanity checks of the options
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}."
# initialize run contexts
self.run_contexts = []
if self.dynamo:
from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args
apply_torchdynamo_args(self, self.opt_args, precision=self.tb_args.fp16)
def add_context(self, context_fn):
ctx = context_fn()
assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug."
self.run_contexts.append(context_fn)
def get_optimizer(self):
raise NotImplementedError("Every E2EModel should implement a way to access the optimizer used.")
def set_optimizer(self, optimizer) -> None:
raise NotImplementedError("Every E2EModel should implement a way to swap out the optimizer(s).")
def next_batch(self):
raise NotImplementedError("Every E2EModel should implement a way to retrieve the next batch.")
def run_forward(self, input):
raise NotImplementedError("Every E2EModel should implement a modular forward step.")
def run_backward(self, loss):
raise NotImplementedError("Every E2EModel should implement a modular backward step.")
def run_optimizer_step(self):
raise NotImplementedError("Every E2EModel should implement a modular optimizer step.")
|
import argparse
import enum
from typing import List, Optional, Tuple
from torchbenchmark.util.backends import list_backends, BACKENDS
from torchbenchmark.util.env_check import is_staged_train_test
TEST_STAGE = enum.Enum('TEST_STAGE', ['FORWARD', 'BACKWARD', 'OPTIMIZER', 'ALL'])
AVAILABLE_PRECISIONS = ["fp32", "tf32", "fp16", "amp", "fx_int8", "bf16","amp_fp16", "amp_bf16"]
QUANT_ENGINES = ["x86", "fbgemm", "qnnpack", "onednn"]
def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, action='store_true')
group.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default_value})
def check_precision(model: 'torchbenchmark.util.model.BenchmarkModel', precision: str) -> bool:
if precision == "fp16":
return model.device == 'cuda' and hasattr(model, "enable_fp16_half")
if precision == "tf32":
return model.device == "cuda"
if precision == "amp":
return True
if precision == "fx_int8":
return model.device == 'cpu' and hasattr(model, "enable_fx_int8")
if precision == "bf16":
return model.device == 'cpu' and hasattr(model, "enable_bf16")
if precision == "amp_fp16":
if model.test == 'eval' and model.device == 'cuda':
return True
if model.test == 'train' and model.device == 'cuda':
return hasattr(model, 'enable_amp') or is_staged_train_test(model)
if precision == "amp_bf16":
return model.device == 'cpu'
assert precision == "fp32", f"Expected precision to be one of {AVAILABLE_PRECISIONS}, but get {precision}"
return True
def check_memory_layout(model: 'torchbenchmark.util.model.BenchmakModel', channels_last: bool) -> bool:
if channels_last:
return hasattr(model, 'enable_channels_last')
return True
def check_distributed_trainer(model: 'torchbenchmark.util.model.BenchmakModel', distributed_trainer: Optional[str]) -> bool:
if not model.test == "train" and distributed_trainer:
return False
return True
def get_precision_default(model: 'torchbenchmark.util.model.BenchmarkModel') -> str:
if hasattr(model, "DEFAULT_EVAL_CUDA_PRECISION") and model.test == 'eval' and model.device == 'cuda':
return model.DEFAULT_EVAL_CUDA_PRECISION
if hasattr(model, "DEFAULT_TRAIN_CUDA_PRECISION") and model.test == 'train' and model.device == 'cuda':
return model.DEFAULT_TRAIN_CUDA_PRECISION
return "fp32"
def parse_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', extra_args: List[str]) -> Tuple[argparse.Namespace, List[str]]:
parser = argparse.ArgumentParser()
parser.add_argument(
"--distributed",
choices=["ddp", "ddp_no_static_graph", "fsdp"],
default=None,
help="Enable distributed trainer",
)
parser.add_argument(
"--distributed_wrap_fn",
type=str,
default=None,
help="Path to function that will apply distributed wrapping fn(model, dargs.distributed)",
)
parser.add_argument("--precision", choices=AVAILABLE_PRECISIONS, default=get_precision_default(model), help=f"choose precisions from {AVAILABLE_PRECISIONS}")
parser.add_argument("--channels-last", action='store_true', help="enable channels-last memory layout")
parser.add_argument("--accuracy", action="store_true", help="Check accuracy of the model only instead of running the performance test.")
parser.add_argument("--use_cosine_similarity", action='store_true', help="use cosine similarity for correctness check")
parser.add_argument("--quant-engine", choices=QUANT_ENGINES, default='x86', help=f"choose quantization engine for fx_int8 precision from {QUANT_ENGINES}")
dargs, opt_args = parser.parse_known_args(extra_args)
if not check_precision(model, dargs.precision):
raise NotImplementedError(f"precision value: {dargs.precision}, "
"fp16 is only supported if the model implements the `enable_fp16_half()` callback function."
"amp is only supported if cuda+eval, or if `enable_amp` implemented,"
"or if model uses staged train interfaces (forward, backward, optimizer).")
if not check_memory_layout(model, dargs.channels_last):
raise NotImplementedError(f"Specified channels_last: {dargs.channels_last} ,"
f" but the model doesn't implement the enable_channels_last() interface.")
if not check_distributed_trainer(model, dargs.distributed):
raise NotImplementedError(f"We only support distributed trainer {dargs.distributed} for train tests, "
f"but get test: {model.test}")
return (dargs, opt_args)
def apply_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', dargs: argparse.Namespace):
if dargs.channels_last:
model.enable_channels_last()
if dargs.precision == "fp16":
model.enable_fp16_half()
elif dargs.precision == "tf32":
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
elif dargs.precision == "amp":
# model handles amp itself if it has 'enable_amp' callback function (e.g. pytorch_unet)
if hasattr(model, "enable_amp"):
model.enable_amp()
elif dargs.precision == "fx_int8":
assert model.device == "cpu" and model.test == "eval", f"fx_int8 only work for eval mode on cpu device."
model.enable_fx_int8(dargs.quant_engine)
elif dargs.precision == "bf16":
assert model.device == "cpu", f"bf16 only work on cpu device."
model.enable_bf16()
elif dargs.precision == "amp_fp16":
assert model.device == "cuda", f"{model.device} has no fp16 autocast."
if model.test == "eval":
import torch
model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16))
elif model.test == "train":
# the model must implement staged train test
assert is_staged_train_test(model), f"Expected model implements staged train test (forward, backward, optimizer)."
import torch
model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16), stage=TEST_STAGE.FORWARD)
elif dargs.precision == "amp_bf16":
import torch
model.add_context(lambda: torch.cpu.amp.autocast(dtype=torch.bfloat16))
elif not dargs.precision == "fp32":
assert False, f"Get an invalid precision option: {dargs.precision}. Please report a bug."
# Dispatch arguments based on model type
def parse_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', opt_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--backend", choices=list_backends(), help="enable backends")
parser.add_argument("--rank", help="rank of current process")
parser.add_argument("--world_size", help="world size of multiprocess")
args, extra_args = parser.parse_known_args(opt_args)
if args.backend:
backend = BACKENDS[args.backend]
model._enable_backend, extra_args = backend(model, backend_args=extra_args)
if args.rank:
model._rank = int(args.rank)
if args.world_size:
model._world_size = int(args.world_size)
return args, extra_args
def apply_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace):
if args.backend:
model._enable_backend()
|
import argparse
import re
import torch
from enum import Enum
class OpType(Enum):
POINTWISE = 1
NORMS = 2
REDUCTIONS = 3
VIEWS_EXPANDS = 4
REMOVE = 5
IGNORE = 6
op_types = {
"aten::rsqrt": OpType.POINTWISE,
"aten::abs": OpType.POINTWISE,
"aten::eq": OpType.POINTWISE,
"aten::gelu": OpType.POINTWISE,
"aten::remainder": OpType.POINTWISE,
"aten::_softmax": OpType.POINTWISE,
"aten::clamp": OpType.POINTWISE,
"aten::gt": OpType.POINTWISE,
"aten::mul": OpType.POINTWISE,
"aten::add": OpType.POINTWISE,
"aten::sum": OpType.REDUCTIONS,
"aten::ne": OpType.POINTWISE,
"aten::silu": OpType.POINTWISE,
"aten::pow": OpType.POINTWISE,
"aten::ge": OpType.POINTWISE,
"aten::native_batch_norm": OpType.NORMS,
"aten::sub": OpType.POINTWISE,
"aten::mean": OpType.REDUCTIONS,
"aten::sqrt": OpType.POINTWISE,
"aten::reciprocal": OpType.POINTWISE,
"aten::reshape": OpType.VIEWS_EXPANDS,
"aten::relu": OpType.POINTWISE,
"prim::Constant": OpType.REMOVE,
"prim::TupleConstruct": OpType.IGNORE,
"aten::div": OpType.POINTWISE,
"aten::tanh": OpType.POINTWISE,
"aten::neg": OpType.POINTWISE,
"aten::log": OpType.POINTWISE,
"aten::unsqueeze": OpType.VIEWS_EXPANDS,
"aten::native_layer_norm": OpType.NORMS,
"aten::exp": OpType.POINTWISE,
"aten::sigmoid": OpType.POINTWISE,
}
def type_to_placeholder(op_type: OpType) -> str:
mapping = {
OpType.POINTWISE: "aten::pointwise_placeholder",
OpType.NORMS: "aten::norm_placeholder",
OpType.REDUCTIONS: "aten::reduction_placeholder",
OpType.VIEWS_EXPANDS: "aten::view_expand_placeholder",
OpType.IGNORE: "aten::ignore_placeholder",
OpType.REMOVE: "aten::remove_placeholder",
}
return mapping[op_type]
# get the op type. op_name is expected to be the qualified name.
def get_type(op_name: str) -> OpType:
if op_name in op_types:
return op_types[op_name]
for optype in OpType:
if type_to_placeholder(optype) == op_name:
return optype
raise NotImplementedError(f"No OpType known for op '{op_name}'")
def simplify_tensor_type(jit_type):
if isinstance(jit_type, torch._C.TensorType):
return torch._C.TensorType.get()
return jit_type
def remove_inputs(graph):
inputs_size = 0
for n in graph.inputs():
inputs_size += 1
for use in n.uses():
use.user.removeInput(use.offset)
for i in reversed(range(inputs_size)):
graph.eraseInput(i)
return graph
# Remove vertices like x or y below, where x or y are pointwise.
# (pointwise) --> (x) --> (...)
# (...) --> (y) --> (pointwise)
# if remove_all is true, then it doesn't care if pointwise ops preceed/succeed x or y.
def remove_duplicate_pointwise(graph, remove_all=False):
to_remove = []
old_str = str(graph)
def bypass_node(n):
to_remove.append(n)
n.output().replaceAllUsesWith(n.input())
for n in graph.nodes():
if get_type(n.kind()) != OpType.POINTWISE:
continue
if n.inputsSize() != 1 or n.outputsSize() != 1:
continue
if get_type(n.input().node().kind()) == OpType.POINTWISE or remove_all:
bypass_node(n)
continue
uses = [r.user for r in n.output().uses() if r.user.kind() != "prim::Return"]
if len(uses) >= 1 and (all(get_type(r.kind()) == OpType.POINTWISE for r in uses) or remove_all):
bypass_node(n)
continue
for n in reversed(to_remove):
n.destroy()
return graph
def compress_graph(graph):
old_nodes = []
erased_nodes = set()
for n in graph.nodes():
simple_type = get_type(n.kind())
if simple_type == OpType.IGNORE:
continue
old_nodes.append(n)
if simple_type == OpType.REMOVE:
erased_nodes.add(n)
continue
new_node = graph.create(type_to_placeholder(simple_type), n.outputsSize())
new_node.insertBefore(n)
for inp in n.inputs():
if inp.node() not in erased_nodes:
new_node.addInput(inp)
for old_out, new_out in zip(n.outputs(), new_node.outputs()):
new_out.setType(simplify_tensor_type(old_out.type()))
old_out.replaceAllUsesWith(new_out)
for n in reversed(old_nodes):
n.destroy()
graph = remove_inputs(graph)
graph = remove_duplicate_pointwise(graph)
return torch._C._jit_pass_canonicalize(graph, False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
Collection of helper functions for eliminating duplicate subgraphs
Usage:
~~~
import classify_graphs
# some ir string called "ir"
graph = torch._C.parse_ir(ir)
# "hashes" the graph based on categories of ops (pointwise, reductions, views/expands, norms)
compressed_graph = classify_graphs.compress_graph(graph)
# do something with the compressed graph
~~~
Alternatively, call it and it will return one graph per hashed category
Usage:
python3 log_extract.py log.txt --output > log_result.py
python3 classify_graphs.py log_result.py > filtered_logs.py
""", formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument("filename", type=str, help="output from log_extract.py --help")
args = parser.parse_args()
with open(args.filename) as f:
arr = eval(f.read())
# see 73984
for i in range(len(arr)):
if len(re.findall(r'value=annotate\(List\[int', arr[i])) >= 1:
arr[i] = arr[0]
classified = {}
for ir in arr:
graph = torch._C.parse_ir(ir)
graph = compress_graph(graph)
graph_class = str(graph)
if graph_class not in classified:
classified[graph_class] = []
classified[graph_class].append(ir)
final_selection = []
for cl, graphs in classified.items():
# choose the longest graph of this type
s = sorted(graphs, key=lambda x: -len(str(x)))
final_selection.append(str(graphs[0]))
print('[' + ', '.join(f'"""{x}"""' for x in final_selection) + ']')
|
"""Utilities for tuning the machine for better benchmark stability.
Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap.
"""
import argparse
import cpuinfo
import distro
import enum
import os
import platform
import psutil
import subprocess
import re
import sys
import typing
from pathlib import Path
def read_sys_file(sysfile: Path):
with open(sysfile, 'r') as f:
return f.read()
def write_sys_file(sysfile: Path, content: str):
print(f"Write {content} to {sysfile}")
with open(sysfile, 'w') as f:
f.write(content)
def check_intel_no_turbo_state(turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'):
return int(read_sys_file(turbo_file))
def set_intel_no_turbo_state(state: int, turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'):
assert state in [0, 1]
write_sys_file(turbo_file, str(state))
def parse_lscpu_cpu_core_list():
coreinfo = subprocess.check_output("lscpu --all --parse=CPU,CORE,ONLINE", shell=True).strip().decode().split('\n')
matched_cpus = 0
cpu_core = []
for line in coreinfo[2:]:
if line[0] == '#':
continue
cpu, core, online = line.split(',')
cpu = int(cpu)
online = online == "Y"
core = int(core) if online else None
if cpu == core:
matched_cpus += 1
cpu_core.append((cpu, core, online))
assert matched_cpus > 0, "Failed to parse lscpu output"
return cpu_core
def hyper_threading_enabled():
for cpu, core, online in parse_lscpu_cpu_core_list():
if cpu != core and online:
return True
return False
def set_hyper_threading(enabled=False):
for cpu, core, online in parse_lscpu_cpu_core_list():
if cpu != core:
if not online and not enabled:
continue
if online and enabled:
continue
virtual_cpu_online_file = f"/sys/devices/system/cpu/cpu{cpu}/online"
value = "1" if enabled else "0"
write_sys_file(virtual_cpu_online_file, value)
def get_intel_max_cstate():
kernel_args = read_sys_file('/proc/cmdline').split()
for arg in kernel_args:
if arg.find('intel_idle.max_cstate') == 0:
return int(arg.split('=')[1])
return None
def get_isolated_cpus():
"""
Returns a list of cpus marked as isolated from the kernel scheduler for regular tasks.
Only tasks scheduled via taskset command can use these cpus, e.g. benchmarking workload.
"""
kernel_args = read_sys_file('/proc/cmdline').split()
isolcpus = set()
for arg in kernel_args:
if arg.find('isolcpus') == 0:
arg = arg.split('=')[1]
chunks = arg.split(',')
for chunk in chunks:
if '-' in chunk:
start, end = chunk.split('-')
for cpu in range(int(start), int(end) + 1):
isolcpus.add(cpu)
else:
isolcpus.add(int(chunk))
return list(isolcpus)
def get_process_cpu_affinity():
p = psutil.Process()
return p.cpu_affinity()
def nvidia_smi_query(query: str, device_ids: typing.List[int] = None):
if device_ids:
device_ids = [str(id) for id in device_ids]
device_ids = ",".join(device_ids)
id_selector = f"-i {device_ids}" if device_ids else ""
values = subprocess.check_output(f'nvidia-smi --query-gpu="{query}" {id_selector} --format=csv,noheader,nounits',
shell=True).strip().decode().split("\n")
return values
def has_nvidia_smi():
try:
subprocess.check_output('nvidia-smi', shell=True)
return True
except:
return False
def get_nvidia_gpu_clocks(device_ids: typing.List[int] = None):
clocks = nvidia_smi_query("clocks.applications.graphics", device_ids)
for clock in range(len(clocks)):
clocks[clock] = 0 if clocks[clock] == '[N/A]' else clocks[clock]
return [int(clock) for clock in clocks]
def get_nvidia_gpu_temps(device_ids: typing.List[int] = None):
temps = {}
raw_temps = nvidia_smi_query("temperature.gpu,temperature.memory", device_ids)
temps['gpu'] = [temp.split(',')[0] for temp in raw_temps]
temps['memory'] = [temp.split(',')[1] for temp in raw_temps]
return temps
def set_nvidia_graphics_clock(device_id=0, clock=900):
if has_nvidia_smi():
return subprocess.check_call(['nvidia-smi', '-ac', '5001,900'])
return False
def get_nvidia_throttle_reasons(device_ids: typing.List[int] = None):
""" See 'nvidia-smi --help-query-gpu for explanation of throttle reasons
"""
queries = ['gpu_idle', 'applications_clocks_setting', 'sw_power_cap',
'hw_slowdown', 'hw_thermal_slowdown',
'hw_power_brake_slowdown', 'sw_thermal_slowdown', 'sync_boost']
query_str = ','.join(["clocks_throttle_reasons." + q for q in queries])
raw = nvidia_smi_query(query_str, device_ids)
throttle_reasons = []
for line in raw:
gpu_reasons = [q for q, v in zip(queries, line.split(',')) if 'Active' == v]
throttle_reasons.append(gpu_reasons)
return throttle_reasons
MACHINE = enum.Enum('MACHINE', ['AMAZON_LINUX', 'UBUNTU', 'UNKNOWN'])
def get_machine_type():
# It's tricky to write platform setup code that works on different OS/configs.
# initially, just intend to identify a known environment and for any other
# environment revert to no-op. Expand functionality over time as needed.
if platform.system() == 'Linux':
if distro.name() == "Amazon Linux":
return MACHINE.AMAZON_LINUX
if platform.system() == 'Linux':
if distro.name() == 'Ubuntu':
return MACHINE.UBUNTU
return MACHINE.UNKNOWN
def get_cpu_temp():
temps = {}
if not MACHINE.UNKNOWN == get_machine_type():
thermal_path = Path('/sys/class/thermal/')
for zone in filter(lambda x: "thermal_zone" in x, os.listdir(thermal_path)):
temps[zone] = int(read_sys_file(thermal_path / zone / "temp")) / 1000.
return temps
def is_using_isolated_cpus():
isolated_cpus = get_isolated_cpus()
using_cpus = get_process_cpu_affinity()
omp_using_cpus = get_omp_affinity()
lscpu = parse_lscpu_cpu_core_list()
assert len(lscpu) > 0, "unable to parse current CPUs"
for cpu, core, active in lscpu:
# check that all used cpus are isolated ones (more critical)
if (cpu in using_cpus or cpu in omp_using_cpus) and cpu not in isolated_cpus:
return False
# check all isolated cpus are used (less critical)
elif active and cpu in isolated_cpus:
if cpu not in using_cpus:
# currently after importing torch, process cpu affinity mask changes from e.g. 4-47 to 4.
# since we can't assert that all intended cores are being used, we can at least assert that
# the first core in the range of isolated cores is used.
# see https://github.com/pytorch/pytorch/issues/49971
# return False
pass
if cpu not in omp_using_cpus:
return False
return True
def get_omp_affinity():
if 'GOMP_CPU_AFFINITY' not in os.environ:
return []
raw = os.environ['GOMP_CPU_AFFINITY']
affinity = []
def parse_block(block):
if '-' in block:
start, end = block.split('-')
return list(range(int(start), int(end) + 1))
return [int(block)]
if ' ' in raw:
for block in raw.split(' '):
affinity.extend(parse_block(block))
else:
affinity.extend(parse_block(raw))
return affinity
def get_pstate_frequency():
CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu'
CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"]
cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]]
output = dict()
for cpu_dir in cpu_dirs:
full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq")
freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES]
all_exist = True
for path in freq_paths:
all_exist = all_exist and os.path.exists(path)
if all_exist:
output[cpu_dir] = dict()
for i, path in enumerate(freq_paths):
output[cpu_dir][CPU_FREQ_FILES[i]] = int(read_sys_file(path)) / 1000
return output
def set_pstate_frequency(min_freq = 2500, max_freq = 2500):
CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu'
CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"]
cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]]
for cpu_dir in cpu_dirs:
full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq")
freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES]
all_exist = True
for path in freq_paths:
all_exist = all_exist and os.path.exists(path)
if all_exist:
write_sys_file(freq_paths[0], str(min_freq * 1000))
write_sys_file(freq_paths[1], str(max_freq * 1000))
def check_pstate_frequency_pin(pin_freq = 2500):
FREQ_THRESHOLD = 15 # Allow 15 MHz difference maximum
all_freq = get_pstate_frequency()
for cpuid in all_freq:
for attr in all_freq[cpuid]:
freq = all_freq[cpuid][attr]
difference = abs(freq - pin_freq)
if difference > FREQ_THRESHOLD:
print(f"Specify frequency {pin_freq} Mhz, find setting {cpuid} {attr}: {freq}.")
return False
return True
def get_machine_config():
config = {}
machine_type = get_machine_type()
config['machine_type'] = machine_type
config['cpu_brand'] = cpuinfo.get_cpu_info()['brand_raw']
if not MACHINE.UNKNOWN == machine_type:
config['linux_distribution'] = distro.linux_distribution()
config['intel_turbo_disabled'] = check_intel_no_turbo_state()
config['intel_hyper_threading_enabled'] = hyper_threading_enabled()
config['intel_max_cstate'] = get_intel_max_cstate()
config['isolated_cpus'] = get_isolated_cpus()
config['process_cpu_affinity'] = get_process_cpu_affinity()
config['is_using_isolated_cpus'] = is_using_isolated_cpus()
config['cpu_pstate_frequency'] = get_pstate_frequency()
return config
def check_machine_configured(check_process_affinity=True):
check_environment()
if not MACHINE.UNKNOWN == get_machine_type():
assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled"
assert False == hyper_threading_enabled(), "HyperThreading is not disabled"
assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes."
assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus"
assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling"
assert is_using_isolated_cpus(), "taskset or GOMP_CPU_AFFINITY not specified or not matching kernel isolated cpus"
assert check_pstate_frequency_pin(), "Must pin CPU frequency to a fixed number in MHz"
else:
raise RuntimeError(f"Unsupported machine type {get_machine_type()}")
def get_machine_state():
state = {}
machine_type = get_machine_type()
state['machine_type'] = machine_type
if not MACHINE.UNKNOWN == machine_type:
state['cpu_temps'] = get_cpu_temp()
if has_nvidia_smi():
state['nvidia_gpu_temps'] = get_nvidia_gpu_temps()
state['nvidia_gpu_clocks'] = get_nvidia_gpu_clocks()
state['nvidia_gpu_throttle_reasons'] = get_nvidia_throttle_reasons()
state['process_cpu_affinity'] = get_process_cpu_affinity()
return state
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--enable_ht", action="store_true", help="Enable HyperThreading")
parser.add_argument("--configure", action="store_true", help="Apply benchmark tuning to this machine")
parser.add_argument("--no_verify", action="store_true", help="Skip verifying machine is configured for benchmarking")
args = parser.parse_args()
machine_type = get_machine_type()
if MACHINE.UNKNOWN == machine_type:
raise RuntimeError(f"Unsupported machine type {machine_type}")
if args.enable_ht:
set_hyper_threading(True)
if args.configure:
set_intel_no_turbo_state(1)
set_hyper_threading(False)
set_nvidia_graphics_clock()
set_pstate_frequency()
if not args.no_verify:
assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled"
assert False == hyper_threading_enabled(), "HyperThreading is not disabled"
assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes."
assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus"
assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling"
assert check_pstate_frequency_pin(), "CPU frequency is not correctly pinned, which is required to minimize noise."
# doesn't make too much sense to ask the user to run this configure script with the isolated cpu cores
# that check is more important to be done at runtime of benchmark, and is checked by conftest.py
#assert is_using_isolated_cpus(), "Not using isolated CPUs for this process"
def check_environment():
checks = [
# VAR_NAME, blacklist
("DEBUG", None),
("MKLDNN_VERBOSE", None),
("PYTORCH_JIT_LOG_LEVEL", None)
]
for check in checks:
if check[0] in os.environ and (check[1] == None or os.environ[check[0]] in check[1]):
raise RuntimeError(f"{check[0]} is set")
|
import importlib
import os
import torch
from contextlib import contextmanager, ExitStack
import warnings
import inspect
import yaml
from pathlib import Path
from typing import ContextManager, Optional, List, Tuple, Generator
from torch.utils._pytree import tree_map
from torchbenchmark import REPO_PATH
from torchbenchmark.util.extra_args import parse_opt_args, apply_opt_args, \
parse_decoration_args, apply_decoration_args, is_staged_train_test, \
TEST_STAGE
from torchbenchmark.util.env_check import set_random_seed, is_hf_model, \
save_deterministic_dict, load_deterministic_dict, check_accuracy
from torchbenchmark.util.fx_int8 import get_sub_module, prepare_sub_module, convert_sub_module
SPECIAL_DEVICE_MAPPING = {
"AMD Instinct MI210": "NVIDIA A100-SXM4-40GB"
}
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def no_grad(val):
"""Some meta-learning models (e.g. maml) may need to train a target(another) model
in inference runs
"""
old_state = torch.is_grad_enabled()
try:
torch.set_grad_enabled(not val)
yield
finally:
torch.set_grad_enabled(old_state)
@contextmanager
def nested(*contexts):
"""
Chain and apply a list of contexts
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx())
yield contexts
# enable JIT profiling executor
@contextmanager
def enable_profiling_executor():
try:
graph_executor = torch._C._get_graph_executor_optimize(True)
profiling_executor = torch._C._jit_set_profiling_executor(True)
profiling_mode = torch._C._jit_set_profiling_mode(True)
yield
finally:
torch._C._jit_set_profiling_mode(profiling_mode)
torch._C._jit_set_profiling_executor(profiling_executor)
torch._C._get_graph_executor_optimize(graph_executor)
class BenchmarkModel(metaclass=PostInitProcessor):
DEFAULT_TRAIN_BSIZE: Optional[int] = None
DEFAULT_EVAL_BSIZE: Optional[int] = None
# by default, deepcopy the model when checking accuracy
# because some models are stateful (such as moco)
DEEPCOPY: bool = True
# by default, turn on deterministic mode when checking accuracy
DISABLE_DETERMINISM: bool = False
test: str
device: str
batch_size: int
extra_args: List[str]
run_contexts: List[ContextManager]
"""
A base class for adding models to torch benchmark.
See [Adding Models](#../models/ADDING_MODELS.md)
"""
def __init__(self, test: str, device: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):
self.metadata = self.load_metadata()
self.test = test
assert self.test == "train" or self.test == "eval", \
f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report."
self.device = device
self.extra_args = extra_args
self.opt = None
# contexts to run in the test function
if self.test == "train":
# In train test, there are run contexts that should only be applied for forward/backward/optimizer stage
# For example, amp only applies for the forward stage
self.forward_contexts = []
self.backward_contexts = []
self.optimizer_contexts = []
self.run_contexts = [
enable_profiling_executor # force JIT profiling executor to be enabled by default
]
set_random_seed()
# sanity checks of the options
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}."
# parse the args
self.dargs, opt_args = parse_decoration_args(self, self.extra_args)
if self.dargs.accuracy and not self.DISABLE_DETERMINISM:
self.deterministic_dict = save_deterministic_dict(self.name)
# if the args contain "--torchdynamo", parse torchdynamo args
if "--torchdynamo" in opt_args:
self.dynamo = True
from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args
self.opt_args, self.extra_args = parse_torchdynamo_args(opt_args)
else:
self.dynamo = False
self.opt_args, self.extra_args = parse_opt_args(self, opt_args)
self.determine_batch_size(batch_size)
# Run the post processing for model acceleration
def __post__init__(self):
# All arguments should be parsed at this point.
assert not self.extra_args, f"Expected no unknown args at this point, found {self.extra_args}"
if self.dargs.accuracy:
self.accuracy = check_accuracy(self)
if not self.DISABLE_DETERMINISM:
load_deterministic_dict(self.deterministic_dict)
return
# apply decoration args
apply_decoration_args(self, self.dargs)
# apply optimization args
if self.dynamo:
from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args
apply_torchdynamo_args(self, self.opt_args, self.dargs.precision)
else:
apply_opt_args(self, self.opt_args)
# setup distributed trainer
if self.dargs.distributed:
if self.dargs.distributed_wrap_fn:
pos = self.dargs.distributed_wrap_fn.rfind(".")
module = importlib.import_module(self.dargs.distributed_wrap_fn[:pos])
apply_trainer = getattr(module, self.dargs.distributed_wrap_fn[(pos+1):])
else:
from torchbenchmark.util.distributed.core_model.apply_trainer import apply_trainer
if is_hf_model(self):
# DDP requires to use unwrapped model for huggingface
module, _inputs = self.get_module(wrap_model=False)
else:
module, _inputs = self.get_module()
self.set_module(apply_trainer(module, self.dargs.distributed))
# Need to clean up the cache because we run deep copy within correceness check
if self.device == "cuda":
torch.cuda.empty_cache()
def determine_batch_size(self, batch_size=None):
# batch size priority for eval tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > device specified > default
# batch size priority for train tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > default
self.batch_size = batch_size
if not batch_size:
self.batch_size = self.DEFAULT_TRAIN_BSIZE if self.test == "train" else self.DEFAULT_EVAL_BSIZE
if self.device == "cuda":
current_device_name = torch.cuda.get_device_name()
assert current_device_name, f"torch.cuda.get_device_name() returns None when device is set to cuda, please double check."
if current_device_name in SPECIAL_DEVICE_MAPPING:
current_device_name = SPECIAL_DEVICE_MAPPING[current_device_name]
else:
current_device_name = str(self.device)
# use the device suggestion on CUDA inference tests, key should be either eval_batch_size or train_batch_size
device_batch_size_key = f"{self.test}_batch_size"
if self.metadata and "devices" in self.metadata and current_device_name in self.metadata["devices"] \
and device_batch_size_key in self.metadata["devices"][current_device_name]:
self.batch_size = self.metadata["devices"][current_device_name][device_batch_size_key]
# If the model doesn't implement test or eval test
# its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None
if not self.batch_size:
raise NotImplementedError(f"Test {self.test} is not implemented.")
else:
self.batch_size = batch_size
# Check if specified batch size is supported by the model
if hasattr(self, "ALLOW_CUSTOMIZE_BSIZE") and (not getattr(self, "ALLOW_CUSTOMIZE_BSIZE")):
if self.test == "train" and (not self.batch_size == self.DEFAULT_TRAIN_BSIZE):
raise NotImplementedError("Model doesn't support customizing batch size.")
elif self.test == "eval" and (not self.batch_size == self.DEFAULT_EVAL_BSIZE):
raise NotImplementedError("Model doesn't support customizing batch size.")
elif self.dargs.accuracy:
self.batch_size = 4 if self.batch_size > 4 else self.batch_size
def load_metadata(self):
relative_path = self.__class__.__module__.split(".")
self.name = relative_path[-1]
metadata_loc = Path(REPO_PATH).joinpath(*relative_path).joinpath("metadata.yaml")
if not metadata_loc.exists():
return None
with open(metadata_loc, "r") as mf:
metadata = yaml.safe_load(mf)
return metadata
def add_context(self, context_fn, stage=TEST_STAGE.ALL):
ctx = context_fn()
assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug."
if stage == TEST_STAGE.ALL:
self.run_contexts.append(context_fn)
elif stage == TEST_STAGE.FORWARD:
self.forward_contexts.append(context_fn)
elif stage == TEST_STAGE.BACKWARD:
self.backward_contexts.append(context_fn)
elif stage == TEST_STAGE.OPTIMIZER:
self.optimizer_contexts.append(context_fn)
# Common interface for all models extending BenchmarkModel to access the optimizer.
# Some models have an opt attribute, others have an optimizer attribute; this
# implementation handles both. This function should not error! Simply return None
# if there's no optimizer in sight.
def get_optimizer(self):
if hasattr(self, "optimizer"):
return self.optimizer
if hasattr(self, "opt"):
return self.opt
warnings.warn("The optimizer for this model is not stored in self.opt nor self.optimizer. "
"Currently returning None! Please override this implementation with your own "
"if there is an optimizer this should be returning instead.")
return None
# Takes in an optimizer and sets that to be the optimizer used from now on.
# There are special models like dcgan that would update multiple optimizers at once,
# so optimizer here is not always strictly a, say, torch.optim.Optimizer.
def set_optimizer(self, optimizer) -> None:
if hasattr(self, "optimizer"):
self.optimizer = optimizer
return
if hasattr(self, "opt"):
self.opt = optimizer
return
raise NotImplementedError("The optimizer for this model is not stored in self.opt nor self.optimizer. "
"Please override this implementation with your own.")
# Default implementation for replacing the model
def set_module(self, new_model):
if hasattr(self, 'model') and isinstance(self.model, torch.nn.Module):
self.model = new_model
else:
raise NotImplementedError("The instance variable 'model' does not exist or is not type 'torch.nn.Module', implement your own `set_module()` function.")
def gen_inputs(self, num_batches: int=1) -> Tuple[Generator, Optional[int]]:
"""Generate a tuple of (iterator of model input, the size of the iterator).
If size is None, the input is randomly generated and has infinite size."""
raise NotImplementedError("Default input generation function is not implemented. "
"Please submit an issue if you need input iterator implementation for the model.")
def invoke_staged_train_test(self) -> None:
optimizer = self.get_optimizer()
if optimizer is not None:
optimizer.zero_grad()
with nested(*self.forward_contexts):
losses = self.forward()
with nested(*self.backward_contexts):
self.backward(losses)
if optimizer is not None:
with nested(*self.optimizer_contexts):
self.optimizer_step()
return None
def invoke(self) -> Optional[Tuple[torch.Tensor]]:
out = None
if self.test == "train" and is_staged_train_test(self):
self.invoke_staged_train_test()
return out
with nested(*self.run_contexts):
if self.test == "train":
self.train()
elif self.test == "eval":
out = self.eval()
return out
def eval_in_nograd(self):
return True
def enable_channels_last(self):
model_name = self.name
try:
model, _ = self.get_module()
model = model.to(memory_format=torch.channels_last)
except RuntimeError:
warnings.warn(UserWarning(f"{model_name} doesn't support `channels_last` yet!"))
return
self.set_module(model)
def inputs_convert(example_inputs):
if isinstance(example_inputs, torch.Tensor) and example_inputs.dim()==4:
return example_inputs.to(memory_format=torch.channels_last)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: inputs_convert(x), example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!"))
return example_inputs
if hasattr(self, 'example_inputs'):
self.example_inputs = inputs_convert(self.example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!"))
def enable_fx_int8(self, quant_engine:str='x86'):
torch.backends.quantized.engine = quant_engine
try:
model, _ = self.get_module()
# Get sub modules
model, sub_module_list = get_sub_module(model, dict(model.named_modules()), '')
if not len(sub_module_list):
warnings.warn(UserWarning(f"{self.name} doesn't have submodule can ben quantized!"))
model = prepare_sub_module(sub_module_list, model, '', quant_engine)
self.set_module(model)
# Calibration
self.eval()
model, _ = self.get_module()
model = convert_sub_module(sub_module_list, model, '')
self.set_module(model)
except Exception as e:
print(e)
raise RuntimeError(f"{self.name} doesn't support `fx_int8` yet!")
def enable_bf16(self):
model_name = self.name
try:
model, _ = self.get_module()
model = model.to(torch.bfloat16)
except RuntimeError:
warnings.warn(UserWarning(f"{model_name} doesn't support `to(torch.bfloat16)` yet!"))
return
self.set_module(model)
def inputs_convert(example_inputs):
if isinstance(example_inputs, torch.Tensor) and example_inputs.dtype == torch.float32:
return example_inputs.to(torch.bfloat16)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: inputs_convert(x), example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!"))
return example_inputs
if hasattr(self, 'example_inputs'):
self.example_inputs = inputs_convert(self.example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!"))
def enable_amp(self):
if not self.dynamo and self.opt_args.backend == 'cudagraph':
return NotImplementedError("AMP not implemented for cudagraphs")
if not hasattr(self, "amp_context"):
raise RuntimeError(f"{self.name} doesn't have amp_context support!")
if self.device == "cpu":
self.amp_context = lambda: torch.cpu.amp.autocast()
elif self.device == "cuda":
self.amp_context = lambda: torch.cuda.amp.autocast()
@property
def pt2_compilation_time(self):
from torch._dynamo.utils import compile_times
compile_time = dict(zip(*compile_times(repr="csv", aggregate=True)))["_compile.<locals>.compile_inner"]
return float(compile_time)
@property
def pt2_graph_breaks(self):
from torch._dynamo.utils import counters
num_graph_breaks = len(counters["graph_break"].keys())
return num_graph_breaks
|
"""
Return a list of recent PyTorch wheels published on download.pytorch.org.
Users can specify package name, python version, platform, and the number of days to return.
If one of the packages specified is missing on one day, the script will skip outputing the results on that day.
"""
import os
import re
import requests
import argparse
import urllib.parse
from datetime import date, timedelta
from bs4 import BeautifulSoup
from collections import defaultdict
import sys
from pathlib import Path
import subprocess
from typing import List
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP
from utils.python_utils import DEFAULT_PYTHON_VERSION, PYTHON_VERSION_MAP
PYTORCH_CUDA_VERISON = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"]
PYTORCH_PYTHON_VERSION = PYTHON_VERSION_MAP[DEFAULT_PYTHON_VERSION]["pytorch_url"]
torch_wheel_nightly_base = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/"
torch_nightly_wheel_index = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/torch_nightly.html"
torch_nightly_wheel_index_override = "torch_nightly.html"
def memoize(function):
"""
"""
call_cache = {}
def memoized_function(*f_args):
if f_args in call_cache:
return call_cache[f_args]
call_cache[f_args] = result = function(*f_args)
return result
return memoized_function
@memoize
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override):
"""
"""
if os.path.isfile(override_file) and os.stat(override_file).st_size:
with open(override_file) as f:
data = f.read()
else:
r = requests.get(url)
r.raise_for_status()
data = r.text
soup = BeautifulSoup(data, 'html.parser')
data = defaultdict(dict)
for link in soup.find_all('a'):
group_match = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text)
# some packages (e.g., torch-rec) doesn't follow this naming convention
if not group_match:
continue
pkg, version, py, py_m, platform = group_match.groups()
version = urllib.parse.unquote(version)
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data
def get_nightly_wheel_urls(packages:list, date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64'):
"""Gets urls to wheels for specified packages matching the date, py_version, platform_version
"""
date_str = f"{date.year}{date.month:02}{date.day:02}"
data = get_wheel_index_data(py_version, platform_version)
rc = {}
for pkg in packages:
pkg_versions = data[pkg]
# multiple versions could happen when bumping the pytorch version number
# e.g., both torch-1.11.0.dev20220211%2Bcu113-cp38-cp38-linux_x86_64.whl and
# torch-1.12.0.dev20220212%2Bcu113-cp38-cp38-linux_x86_64.whl exist in the download link
keys = sorted([key for key in pkg_versions if date_str in key], reverse=True)
if len(keys) > 1:
print(f"Warning: multiple versions matching a single date: {keys}, using {keys[0]}")
if len(keys) == 0:
return None
full_url = pkg_versions[keys[0]]
rc[pkg] = {
"version": keys[0],
"wheel": full_url,
}
return rc
def get_nightly_wheels_in_range(packages:list, start_date:date, end_date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
rc = []
curr_date = start_date
while curr_date <= end_date:
curr_wheels = get_nightly_wheel_urls(packages, curr_date,
py_version=py_version,
platform_version=platform_version)
if curr_wheels is not None:
rc.append(curr_wheels)
curr_date += timedelta(days=1)
if reverse:
rc.reverse()
return rc
def get_n_prior_nightly_wheels(packages:list, n:int,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
end_date = date.today()
start_date = end_date - timedelta(days=n)
return get_nightly_wheels_in_range(packages, start_date, end_date,
py_version=py_version, platform_version=platform_version, reverse=reverse)
def get_most_recent_successful_wheels(packages: list, pyver: str, platform: str) -> List[str]:
"""Get the most recent successful nightly wheels. Return List[str] """
curr_date = date.today()
date_limit = curr_date - timedelta(days=365)
while curr_date >= date_limit:
wheels = get_nightly_wheel_urls(packages, curr_date, py_version=pyver, platform_version=platform)
if wheels:
return wheels
curr_date = curr_date - timedelta(days=1)
# Can't find any valid pytorch package
return None
def install_wheels(wheels):
"""Install the wheels specified in the wheels."""
wheel_urls = list(map(lambda x: wheels[x]["wheel"], wheels.keys()))
work_dir = Path(__file__).parent.joinpath(".data")
work_dir.mkdir(parents=True, exist_ok=True)
requirements_file = work_dir.joinpath("requirements.txt").resolve()
with open(requirements_file, "w") as rf:
rf.write("\n".join(wheel_urls))
command = ["pip", "install", "-r", str(requirements_file)]
print(f"Installing pytorch nightly packages command: {command}")
subprocess.check_call(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pyver", type=str, default=PYTORCH_PYTHON_VERSION, help="PyTorch Python version")
parser.add_argument("--platform", type=str, default="linux_x86_64", help="PyTorch platform")
parser.add_argument("--priordays", type=int, default=1, help="Number of days")
parser.add_argument("--reverse", action="store_true", help="Return reversed result")
parser.add_argument("--packages", required=True, type=str, nargs="+", help="List of package names")
parser.add_argument("--install-nightlies", action="store_true",
help="Install the most recent successfully built nightly packages")
args = parser.parse_args()
if args.install_nightlies:
wheels = get_most_recent_successful_wheels(args.packages, args.pyver, args.platform)
assert wheels, f"We do not find any successful pytorch nightly build of packages: {args.packages}."
print(f"Found pytorch nightly wheels: {wheels} ")
install_wheels(wheels)
exit(0)
wheels = get_n_prior_nightly_wheels(packages=args.packages,
n=args.priordays,
py_version=args.pyver,
platform_version=args.platform,
reverse=args.reverse)
for wheelset in wheels:
for pkg in wheelset:
print(f"{pkg}-{wheelset[pkg]['version']}: {wheelset[pkg]['wheel']}")
|
"""
Utils for model metadata
"""
from typing import Any, List, Dict
def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool:
if item_name not in skip_item:
return True
return skip_item[item_name] == item_val
def skip_by_metadata(test: str, device:str, extra_args: List[str], metadata: Dict[str, Any]) -> bool:
"Check if the test should be skipped based on model metadata."
if not "not_implemented" in metadata:
return False
for skip_item in metadata["not_implemented"]:
match = match_item("test", test, skip_item) and \
match_item("device", device, skip_item) and \
match_item("extra_args", extra_args, skip_item)
if match:
return True
return False
|
def prefetch_loader(loader, device):
result = []
for data in loader:
items = []
for item in data:
items.append(item.to(device))
result.append(tuple(items))
return result |
"""
PyTorch benchmark env check utils.
This file may be loaded without torch packages installed, e.g., in OnDemand CI.
"""
import copy
import importlib
import os
import argparse
import logging
from contextlib import contextmanager, ExitStack
from typing import Any, Dict, List, Optional
MAIN_RANDOM_SEED = 1337
# rounds for stableness tests
STABLENESS_CHECK_ROUNDS: int = 3
# rounds for correctness tests
CORRECTNESS_CHECK_ROUNDS: int = 2
# Use the list from
# https://github.com/pytorch/pytorch/blob/6c7410ddc350fea625e47744da9d6be7ec74b628/benchmarks/dynamo/common.py#L2247
UNSUPPORTED_USE_DETERMINISTIC_ALGORITHMS = [
"alexnet",
"Background_Matting",
"pytorch_CycleGAN_and_pix2pix",
"pytorch_unet",
"sam",
"Super_SloMo",
"vgg16",
]
CI_SKIP_OPTIMIZER = {
# TIMM
"convmixer_768_32", # accuracy
"hrnet_w18", # Stack issue in fx
# TorchBench
"dlrm", # symbolic shapes error
# HF
"pnasnet5large", # Stack issue in fx
"MobileBertForMaskedLM", # Stack issue in fx
"MobileBertForQuestionAnswering", # Stack issue in fx
"PegasusForConditionalGeneration", # OOM
}
# Need lower tolerance on GPU. GPU kernels have non deterministic kernels for these models.
REQUIRE_HIGHER_TOLERANCE = {
"alexnet",
"densenet121",
"hf_Albert",
"vgg16",
"mobilenet_v3_large",
"nvidia_deeprecommender",
"timm_efficientdet",
}
# These models need >1e-3 tolerance
REQUIRE_EVEN_HIGHER_TOLERANCE = {
"soft_actor_critic",
"tacotron2",
}
REQUIRE_HIGHER_FP16_TOLERANCE = {
"drq",
}
REQUIRE_COSINE_TOLERACE = {
# Just keeping it here even though its empty, if we need this in future.
}
SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS = {
# Models that deterministic algorithms can not be turned on for eager mode.
"Background_Matting",
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_maskrcnn",
"stable_diffusion_unet",
}
# Use the list from
# https://github.com/pytorch/pytorch/blob/6c7410ddc350fea625e47744da9d6be7ec74b628/benchmarks/dynamo/torchbench.py#L382
USE_GRAD_IN_INFERENCE = [
"maml"
]
HAS_NUMPY = True
log = logging.getLogger(__name__)
@contextmanager
def nested(*contexts):
"""
Chain and apply a list of contexts
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx())
yield contexts
def pick_grad(name: str, is_training: bool):
import torch
if is_training or name in USE_GRAD_IN_INFERENCE:
return torch.enable_grad()
else:
return torch.no_grad()
def set_random_seed():
"""Make torch manual seed deterministic. Helps with accuracy testing."""
import torch
import random
import numpy
def deterministic_torch_manual_seed(*args, **kwargs):
from torch._C import default_generator
seed = MAIN_RANDOM_SEED
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed(MAIN_RANDOM_SEED)
random.seed(MAIN_RANDOM_SEED)
numpy.random.seed(MAIN_RANDOM_SEED)
torch.manual_seed = deterministic_torch_manual_seed
def get_pkg_versions(packages: List[str]) -> Dict[str, str]:
versions = {}
for module in packages:
module = importlib.import_module(module)
versions[module] = module.__version__
return versions
def has_native_amp() -> bool:
import torch
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
return True
except AttributeError:
pass
return False
def is_timm_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'TIMM_MODEL') and model.TIMM_MODEL
def is_torchvision_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'TORCHVISION_MODEL') and model.TORCHVISION_MODEL
def is_hf_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'HF_MODEL') and model.HF_MODEL
def is_fambench_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'FAMBENCH_MODEL') and model.FAMBENCH_MODEL
def is_staged_train_test(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'forward') and hasattr(model, 'backward') and hasattr(model, 'optimizer_step')
def save_deterministic_dict(name: str):
determinism_dict = {}
if "CUBLAS_WORKSPACE_CONFIG" in os.environ:
determinism_dict["CUBLAS_WORKSPACE_CONFIG"] = os.environ["CUBLAS_WORKSPACE_CONFIG"]
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
import torch
determinism_dict["torch.use_deterministic_algorithms"] = torch.are_deterministic_algorithms_enabled()
determinism_dict["torch.backends.cudnn.allow_tf32"] = torch.backends.cudnn.allow_tf32
determinism_dict["torch.backends.cudnn.benchmark"] = torch.backends.cudnn.benchmark
determinism_dict["torch.backends.cuda.matmul.allow_tf32"] = torch.backends.cuda.matmul.allow_tf32
if not name in UNSUPPORTED_USE_DETERMINISTIC_ALGORITHMS:
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = False
return determinism_dict
def load_deterministic_dict(determinism_dict: Dict[str, bool]):
if "CUBLAS_WORKSPACE_CONFIG" in determinism_dict:
os.environ["CUBLAS_WORKSPACE_CONFIG"] = determinism_dict["CUBLAS_WORKSPACE_CONFIG"]
elif "CUBLAS_WORKSPACE_CONFIG" in os.environ:
del os.environ["CUBLAS_WORKSPACE_CONFIG"]
import torch
torch.use_deterministic_algorithms(determinism_dict["torch.use_deterministic_algorithms"])
torch.backends.cudnn.allow_tf32 = determinism_dict["torch.backends.cudnn.allow_tf32"]
torch.backends.cudnn.benchmark = determinism_dict["torch.backends.cudnn.benchmark"]
torch.backends.cuda.matmul.allow_tf32 = determinism_dict["torch.backends.cuda.matmul.allow_tf32"]
def cast_to(dtype, model, inputs):
import torch
from torch.utils._pytree import tree_map
# cast model and inputs to fp16
if dtype == torch.float16:
model = model.half()
else:
model = model.to(dtype)
inputs = tree_map(
lambda x: x.to(dtype)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x,
inputs,
)
return model, inputs
def collect_results(model, prediction, loss, example_inputs):
import torch
results = []
results.append(prediction)
results.append(loss)
# if isinstance(loss, torch.Tensor) and loss.item() > 1:
# log.warning(
# f"High loss value alert - {loss:.2f}. Can result in unstable gradients."
# )
grads = dict()
params = dict()
for name, param in model.named_parameters():
# if isinstance(model, eval_frame.OptimizedModule):
# name = remove_optimized_module_prefix(name)
param_copy = param
grad = param.grad
# Treat None and zero grad as same
if param.grad is None:
grad = torch.zeros_like(param)
grads[name + ".grad"] = grad
params[name] = param_copy
results.append(grads)
results.append(params)
buffers = dict()
for name, buffer in model.named_buffers():
# if isinstance(model, eval_frame.OptimizedModule):
# name = remove_optimized_module_prefix(name)
buffers[name] = buffer
results.append(buffers)
for example in example_inputs:
if isinstance(example, (tuple, list)):
for inp in example:
if isinstance(inp, torch.Tensor):
results.append(inp.grad)
else:
if isinstance(example, torch.Tensor):
results.append(example.grad)
return results
def clone_input(x, *, dtype=None):
"""copy while preserving strides"""
import torch
# TODO: this is questionable
if isinstance(x, torch._subclasses.FakeTensor):
# this func fails on fake tensors in __torch_dispatch__
return x
def torch_clone(x):
y = torch.clone(x)
if x.is_leaf:
y.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
y.grad = clone_input(x.grad, dtype=dtype)
if hasattr(x, "_dynamo_dynamic_indices"):
y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return y
with torch.no_grad():
if x.device.type == "xla":
# Access data_ptr() for a xla tensor will cause crash
return torch_clone(x)
needed_size = sum(
(shape - 1) * stride for shape, stride in zip(x.size(), x.stride())
)
if x.is_quantized:
result = torch.empty_quantized((needed_size + 32,), x)
else:
result = torch.empty(
needed_size + 32, dtype=dtype or x.dtype, device=x.device
)
cache_line_offset = (
(x.data_ptr() - result.data_ptr()) % 32
) // x.element_size()
result.as_strided_(x.size(), x.stride(), cache_line_offset)
try:
result.copy_(x.clone())
if x.is_leaf:
result.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
result.grad = clone_input(x.grad, dtype=dtype)
except RuntimeError:
# RuntimeError: unsupported operation: more than one element of the written-to
# tensor refers to a single memory location. Please clone() the tensor before
# performing the operation.
return torch_clone(x)
if hasattr(x, "_dynamo_dynamic_indices"):
result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return result
def clone_inputs(example_inputs):
import torch
if type(example_inputs) is dict:
res = dict(example_inputs)
for key, value in res.items():
assert isinstance(value, torch.Tensor)
res[key] = clone_input(value)
return res
res = list(example_inputs)
for i in range(len(res)):
if isinstance(res[i], torch.Tensor):
res[i] = clone_input(res[i])
return res
def init_optimizer(name, device, params, is_training):
import torch
if device == "cuda" and is_training and name not in CI_SKIP_OPTIMIZER:
optimizer = torch.optim.SGD(params, lr=0.01)
else:
optimizer = None
return optimizer
def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
import torch
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
elif type(out).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
):
return reduce_to_scalar_loss(out.logits)
elif type(out).__name__ == "SquashedNormal":
return out.mean.sum()
elif isinstance(out, dict):
return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
out.keys()
)
elif out == None:
return 0.0
raise NotImplementedError("Don't know how to reduce", type(out))
def compute_loss(pred):
return reduce_to_scalar_loss(pred)
def optimizer_zero_grad(optimizer, mod):
if optimizer is not None:
optimizer.zero_grad(True)
else:
mod.zero_grad(True)
def optimizer_step(optimizer):
if optimizer is not None:
optimizer.step()
def forward_pass(mod, inputs, contexts, _collect_outputs=True):
with nested(*contexts):
return mod(*inputs)
def forward_and_backward_pass(mod, inputs, contexts, optimizer, collect_outputs=True):
cloned_inputs = clone_inputs(inputs)
optimizer_zero_grad(optimizer, mod)
with nested(*contexts):
pred = mod(*cloned_inputs)
loss = compute_loss(pred)
loss.backward(retain_graph=True)
optimizer_step(optimizer)
if collect_outputs:
return collect_results(mod, pred, loss, cloned_inputs)
return None
def run_n_iterations(mod, inputs, contexts, optimizer=None, is_training=False, iterations=STABLENESS_CHECK_ROUNDS):
def _model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs):
if is_training:
return forward_and_backward_pass(mod, inputs, contexts, optimizer, collect_outputs)
else:
return forward_pass(mod, inputs, contexts, collect_outputs)
for _ in range(iterations - 1):
_model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs=False)
return _model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs=True)
def get_tolerance_and_cosine_flag(model, is_training, current_device, name):
tolerance = 1e-4
cosine = model.dargs.use_cosine_similarity
# Increase the tolerance for torch allclose
if model.dargs.precision == "fp16" or model.dargs.precision == "amp":
if name in REQUIRE_HIGHER_FP16_TOLERANCE:
return 1e-2, cosine
return 1e-3, cosine
if is_training and current_device == "cuda":
tolerance = 1e-3
if name in REQUIRE_COSINE_TOLERACE:
cosine = True
elif name in REQUIRE_HIGHER_TOLERANCE:
tolerance = 1e-3
elif name in REQUIRE_EVEN_HIGHER_TOLERANCE:
tolerance = 8 * 1e-2
return tolerance, cosine
def skip_accuracy_check_as_eager_non_deterministic(is_training):
if is_training:
return SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS
return set()
def check_accuracy(tbmodel: 'torchbenchmark.util.model.BenchmarkModel') -> str:
import torch
import functools
def _equal_nan_p(precision):
equal_nan = True
if precision == "fp32":
equal_nan = False
return equal_nan
def reset_rng_state():
set_random_seed()
def deepcopy_model(model, is_deepcopy):
if not is_deepcopy:
return model
try:
return copy.deepcopy(model)
except TypeError:
return model
def maybe_cast(tbmodel, model, example_inputs):
model = deepcopy_model(model, tbmodel.DEEPCOPY)
example_inputs = clone_inputs(example_inputs)
if tbmodel.dargs.precision == "fp32":
model, example_inputs = cast_to(torch.float32, model, example_inputs)
elif tbmodel.dargs.precision == "fp16":
model, example_inputs = cast_to(torch.float16, model, example_inputs)
elif tbmodel.dargs.precision == "bf16":
model, example_inputs = cast_to(torch.bfloat16, model, example_inputs)
return model, example_inputs
model, example_inputs = tbmodel.get_module()
name = tbmodel.name
current_device = tbmodel.device
optimizer = None
is_training = tbmodel.test == "train"
is_deepcopy = tbmodel.DEEPCOPY
accuracy_status = "pass"
contexts = []
equal_nan = _equal_nan_p(tbmodel.dargs.precision)
if tbmodel.device == "cuda" and tbmodel.dargs.precision == "amp" and is_training:
contexts.append(torch.cuda.amp.autocast)
elif tbmodel.dargs.precision == "amp" and tbmodel.dargs.precision == "bf16" and tbmodel.device == "cpu":
contexts.append(torch.cpu.amp.autocast)
# Collect the fp64 reference outputs to be used later for accuracy checking.
fp64_outputs = None
try:
model_fp64, inputs_fp64 = cast_to(
torch.float64,
deepcopy_model(model, is_deepcopy=True),
clone_inputs(example_inputs),
)
optimizer = init_optimizer(name, current_device, model_fp64.parameters(), is_training)
fp64_outputs = run_n_iterations(model_fp64, inputs_fp64, contexts, optimizer, is_training)
except Exception:
log.warning(
"fp64 golden ref were not generated for %s. Setting accuracy check to cosine",
tbmodel.name,
)
tbmodel.dargs.use_cosine_similarity = True
fp64_outputs = None
tolerance, cos_similarity = get_tolerance_and_cosine_flag(
tbmodel, is_training, current_device, name
)
# Cast the model to float16/float32 as necessary
model, example_inputs = maybe_cast(tbmodel, model, example_inputs)
with pick_grad(name, is_training):
# Get results of native pytorch
reset_rng_state()
try:
model_copy = deepcopy_model(model, is_deepcopy)
optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training)
correct_result = run_n_iterations(
model_copy, clone_inputs(example_inputs), contexts, optimizer, is_training
)
except Exception as e:
accuracy_status = (
"eager_1st_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_1st_run_fail"
)
print(e)
log.exception(e)
return accuracy_status
# Rerun native pytorch
reset_rng_state()
try:
model_copy = deepcopy_model(model, is_deepcopy)
optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training)
correct_rerun_result = run_n_iterations(
model_copy, clone_inputs(example_inputs), contexts, optimizer, is_training
)
except Exception as e:
accuracy_status = (
"eager_2nd_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_2nd_run_fail"
)
return accuracy_status
# Two eager runs should have exactly same result
is_same = True
try:
if (
name not in skip_accuracy_check_as_eager_non_deterministic(is_training)
and not same(
correct_result,
correct_rerun_result,
fp64_ref=None,
cos_similarity=False,
tol=0,
equal_nan=equal_nan,
)
):
is_same = False
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
if not is_same:
accuracy_status = "eager_two_runs_differ"
return accuracy_status
if not hasattr(tbmodel.opt_args, 'torchdynamo') or not tbmodel.opt_args.torchdynamo:
return accuracy_status
correct_rerun_result = None
# Run with Dynamo
# Sometime CI fails with random triton compilation failure which will be skipped for now
# TODO: revisit this after switching to new Triton runtime
reset_rng_state()
torch._dynamo.reset()
optimize_ctx = functools.partial(
torch.compile,
backend=tbmodel.opt_args.torchdynamo,
)
try:
model_copy = deepcopy_model(model, is_deepcopy)
optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training)
optimized_model_iter_fn = optimize_ctx(run_n_iterations)
new_result = optimized_model_iter_fn(model_copy, example_inputs, contexts, optimizer, is_training)
except Exception as e:
log.exception(e)
accuracy_status = (
"OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "fail_to_run"
)
return accuracy_status
try:
if not same(
correct_result,
new_result,
fp64_outputs,
equal_nan=equal_nan,
cos_similarity=cos_similarity,
tol=tolerance,
):
is_same = False
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
if not is_same:
accuracy_status = "fail_accuracy"
return accuracy_status
return accuracy_status
def istype(obj, allowed_types):
"""isinstance() without subclasses"""
if isinstance(allowed_types, (tuple, list, set)):
return type(obj) in allowed_types
return type(obj) is allowed_types
def is_numpy_int_type(value):
if HAS_NUMPY:
import numpy as np
return istype(
value,
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
)
else:
return False
def is_numpy_float_type(value):
if HAS_NUMPY:
import numpy as np
return istype(
value,
(
np.float16,
np.float32,
np.float64,
),
)
else:
return False
def is_numpy_ndarray(value):
if HAS_NUMPY:
import numpy as np
return istype(value, np.ndarray)
else:
return False
def rmse(ref, res):
"""
Calculate root mean squared error
"""
import torch
return torch.sqrt(torch.mean(torch.square(ref - res)))
def same(
ref,
res,
fp64_ref=None,
cos_similarity=False,
tol=1e-4,
equal_nan=False,
exact_dtype=True,
relax_numpy_equality=False,
ignore_non_fp=False,
log_error=log.error,
):
"""Check correctness to see if ref and res match"""
import math
import torch
if fp64_ref is None:
fp64_ref = ref
if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)):
assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}"
return len(ref) == len(res) and all(
same(
ai,
bi,
fp64_refi,
cos_similarity,
tol,
equal_nan,
exact_dtype,
relax_numpy_equality,
ignore_non_fp,
log_error=log_error,
)
for ai, bi, fp64_refi in zip(ref, res, fp64_ref)
)
elif isinstance(ref, dict):
assert isinstance(res, dict)
assert set(ref.keys()) == set(
res.keys()
), f"keys mismatch {set(ref.keys())} == {set(res.keys())}"
for k in sorted(ref.keys()):
if not (
same(
ref[k],
res[k],
fp64_ref[k],
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
):
log_error("Accuracy failed for key name %s", k)
return False
return True
elif isinstance(ref, torch.Tensor):
assert not isinstance(ref, torch._subclasses.FakeTensor)
assert not isinstance(res, torch._subclasses.FakeTensor)
if ref.is_sparse:
assert res.is_sparse
ref = ref.to_dense()
res = res.to_dense()
assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}"
if exact_dtype:
if ref.dtype != res.dtype:
log_error("dtype mismatch %s, %s", ref.dtype, res.dtype)
return False
if ref.dtype == torch.bool:
if ignore_non_fp:
return True
# triton stores bool as int8, so add this for more accurate checking
r = torch.allclose(
ref.to(dtype=torch.uint8),
res.to(dtype=torch.uint8),
atol=tol,
rtol=tol,
equal_nan=equal_nan,
)
if not r:
log_error("Accuracy failed: uint8 tensor did not match")
return r
if cos_similarity:
ref = ref.flatten().to(torch.float32)
res = res.flatten().to(torch.float32)
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True):
# early exit that handles zero/nan better
# cosine_similarity(zeros(10), zeros(10), dim=0) is 0
return True
score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6)
if score < 0.99:
log.warning("Similarity score=%s", score.cpu().detach().item())
return score >= 0.99
else:
if not exact_dtype:
ref = ref.to(res.dtype)
# First try usual allclose
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan):
return True
# Check error from fp64 version
if fp64_ref.dtype == torch.float64:
ref_error = rmse(fp64_ref, ref).item()
res_error = rmse(fp64_ref, res).item()
multiplier = 2.0
if (
fp64_ref.numel() < 1000
or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1)
# large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE
or tol >= 2 * 1e-2
):
# In the presence of noise, noise might dominate our error
# metric for smaller tensors.
# Similary, for 1x1 kernels, there seems to be high noise with amp.
multiplier = 3.0
passes_test = res_error <= (multiplier * ref_error + tol / 10.0)
if not passes_test:
log_error(
"RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s",
res_error,
ref_error,
res.size(),
)
# import pdb; pdb.set_trace()
return passes_test
if ignore_non_fp:
return True
log_error("Accuracy failed: allclose not within tol=%s", tol)
return False
elif isinstance(ref, (str, int, type(None), bool, torch.device)):
if ignore_non_fp:
return True
r = ref == res
if not r:
log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res)
return r
elif isinstance(ref, float):
r = math.isclose(ref, res, rel_tol=tol, abs_tol=tol)
if not r:
log_error(
"Accuracy failed (float): %s != %s (within tol=%s)", ref, res, tol
)
return r
elif is_numpy_int_type(ref) or is_numpy_float_type(ref):
if relax_numpy_equality and not (
is_numpy_int_type(res) or is_numpy_float_type(res)
):
ref = ref.item()
r = (type(ref) is type(res)) and (ref == res)
if not r:
log_error("Accuracy failed (numpy): %s != %s", ref, res)
return r
elif is_numpy_ndarray(ref):
return (type(ref) is type(res)) and (ref == res).all()
elif type(ref).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
"LongformerMaskedLMOutput",
"Instances",
"SquashedNormal",
"Boxes",
"Normal",
"TanhTransform",
"Foo",
"Variable",
):
assert type(ref) is type(res)
return all(
same(
getattr(ref, key),
getattr(res, key),
getattr(fp64_ref, key),
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
for key in ref.__dict__.keys()
)
else:
raise RuntimeError(f"unsupported type: {type(ref).__name__}")
|
import re
import torch
from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list
from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx
from torchbenchmark.util.env_check import is_hf_model
def _append_attr(fx_module, module, fx_white_list=[]):
fx_attr = dir(fx_module)
org_attr = dir(module)
ignore_match_patterns = [r"_", r"quant", r"dequant", r"weight",
r"bias", r'activation_post_process']
ignore_search_patterns = [r"_scale_", r"_zero_point_",
r'_activation_post_process_']
add_special_patterns = [r"_forward_hooks", r"_forward_pre_hooks", r"_backward_hooks"]
attr_names = []
for i in org_attr:
if type(module) in fx_white_list and type(module) != torch.nn.Sequential \
and any([re.search(p, i) for p in add_special_patterns]):
continue
if any([re.search(p, i) for p in add_special_patterns]) \
or (i not in fx_attr \
and not any([re.match(p, i) for p in ignore_match_patterns]) \
and not any([re.search(p, i) for p in ignore_search_patterns])) :
attr_names.append(i)
for name in attr_names:
attr = getattr(module, name, None)
if isinstance(attr, torch.nn.Module) or \
isinstance(attr, torch.quantization.qconfig.QConfig):
continue
setattr(fx_module, name, attr)
return fx_module
def get_sub_module(model, module_dict, prefix):
fx_white_list = get_default_qconfig_propagation_list()
ignore_list = []
if is_hf_model:
import transformers
ignore_list.extend([transformers.models.gpt2.modeling_gpt2.GPT2Attention, transformers.models.t5.modeling_t5.T5DenseActDense])
def _get_sub_module(model, module_dict, prefix, sub_module_list):
for name, module in model.named_children():
quant_wrap_flag = False
if type(module) in ignore_list:
continue
op_name = prefix + "." + name if prefix != "" else name
if op_name not in module_dict:
continue
if type(module) in fx_white_list and type(module) != torch.nn.Sequential:
module = QuantWrapper(module)
quant_wrap_flag = True
try:
graph_module = torch.fx.symbolic_trace(module)
if not quant_wrap_flag and str(module.get_submodule).count("\n") != str(graph_module.get_submodule).count("\n"):
continue
_fuse_fx(graph_module, False)
setattr(model, name, module)
sub_module_list.append(op_name)
except:
module = _get_sub_module(module, module_dict, op_name, sub_module_list)
setattr(model, name, module)
return model
sub_module_list = []
model = _get_sub_module(model, module_dict, prefix, sub_module_list)
return model, sub_module_list
def prepare_sub_module(sub_module_list, model, prefix, quant_engine:str='x86'):
qconfig_mapping = get_default_qconfig_mapping(quant_engine)
for name, module in model.named_children():
op_name = prefix + '.' + name if prefix != '' else name
if op_name in sub_module_list:
prepared_module = prepare_fx(module, qconfig_mapping, None)
_append_attr(prepared_module, module)
setattr(model, name, prepared_module)
else:
prepared_module = prepare_sub_module(sub_module_list, module, op_name, quant_engine)
_append_attr(prepared_module, module)
setattr(model, name, prepared_module)
return model
def convert_sub_module(sub_module_list, model, prefix):
for name, module in model.named_children():
op_name = prefix + '.' + name if prefix != '' else name
if op_name in sub_module_list:
convert_module = convert_fx(module)
setattr(model, name, convert_module)
else:
convert_module = convert_sub_module(sub_module_list, module, op_name)
setattr(model, name, convert_module)
return model
|
import json
import os
import pandas as pd
import typing
class BenchmarkData:
def __init__(self):
self._benchmark_data = {}
self._machine_info = {}
self._commit_info = {}
self._names_all = set()
self._names_common = set()
self._tags = []
self._json_raw = []
def add_json_data(self, tag, json_data):
names = set([b['name'] for b in json_data['benchmarks']])
self._names_all.update(names)
if len(self._benchmark_data) == 0:
self._names_common.update(names)
else:
self._names_common.intersection_update(names)
self._benchmark_data[tag] = {b['name']: b for b in json_data['benchmarks']}
self._machine_info[tag] = json_data['machine_info']
self._commit_info[tag] = json_data['commit_info']
self._tags.append(tag)
self._json_raw.append(json_data)
def tags(self):
return list(self._benchmark_data.keys())
def benchmark_names(self, mode='common', keyword_filter=None):
"""
Return the names of benchmarks across the dataset.
mode:
'common': intersection across dataset files - useful for comparison plot
'all': union across dataset files
'outliers': union - intersection across dataset files
"""
if mode == 'common':
names = self._names_common
elif mode == 'all':
names = self._names_all
elif mode == 'outliers':
names = self._names_all - self._names_common
if keyword_filter is not None:
if isinstance(keyword_filter, str):
keyword_filter = [keyword_filter]
for kw in keyword_filter:
names = [n for n in names if kw in n]
return names
def as_dataframe(self, name, max_data=100):
df = pd.DataFrame()
for i, tag in enumerate(self._benchmark_data):
benchmark = self._benchmark_data[tag][name]
df = df.append(pd.DataFrame()
.assign(time=benchmark['stats']['data'][:max_data])
.assign(tag=tag)
.assign(file_idx=i)
.assign(git_repo=self._commit_info[tag]['project'])
.assign(git_commit=self._commit_info[tag]['id'])
.assign(torch=self._machine_info[tag]['pytorch_version'])
.assign(torchvision=self._machine_info[tag]['torchvision_version'])
.assign(date=self._commit_info[tag]['time']), ignore_index=True)
return df
def load_data_dir(data_dir, most_recent_files:int =None, use_history_file=True):
"""
load all the files in the given data dir, up to N most recent.
if use_history_file=True, find most recent files using order in history file.
"""
history_file = os.path.join(data_dir, 'history')
if os.path.isfile(history_file):
with open(history_file) as hf:
history = hf.read().splitlines()
files = [os.path.join(data_dir, f) for f in history]
else:
files = sorted([os.path.join(data_dir, f) for f in os.listdir(data_dir) if os.path.splitext(f)[1] == '.json'])
if most_recent_files is not None:
files = files[:most_recent_files]
return load_data_files(files)
def load_data_files(files: typing.List[str]):
data = BenchmarkData()
for fname in files:
try:
with open(fname) as f:
data.add_json_data(fname, json.load(f))
except:
print(f"Error loading {fname}")
raise
return data
|
import os
import sys
import subprocess
import traceback
from pathlib import Path
from torchbenchmark import REPO_PATH
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
def update_lit_llama_submodule():
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules", "lit-llama")]
subprocess.check_call(update_command, cwd=REPO_PATH)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', os.path.join(LIT_LLAMA_PATH, "requirements.txt")])
def openllama_download():
if os.path.exists(os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth")):
return
subprocess.check_call([
sys.executable,
os.path.join(LIT_LLAMA_PATH, 'scripts/download.py'),
'--repo_id',
'openlm-research/open_llama_7b_700bt_preview',
'--local_dir',
os.path.join(LIT_LLAMA_PATH, 'checkpoints/open-llama/7B')
])
subprocess.check_call([
sys.executable,
os.path.join(LIT_LLAMA_PATH, 'scripts/convert_hf_checkpoint.py'),
'--checkpoint_dir', os.path.join(LIT_LLAMA_PATH, 'checkpoints/open-llama/7B'),
'--model_size', '7B',
], cwd=LIT_LLAMA_PATH)
def install_lit_llama():
import torch
update_lit_llama_submodule()
pip_install_requirements()
try:
from pynvml import nvmlDeviceGetMemoryInfo
info = nvmlDeviceGetMemoryInfo(torch.cuda._get_pynvml_handler())
if info.total < 40 * 1024 ** 3:
print("not enough GPU memory for 7B parameters, skipping llama (avail: {info.total / 1024 ** 3}GB)")
return
except Exception as e:
print("failed to test GPU memory, skipping llama weights")
traceback.print_exc()
return
openllama_download()
|
import subprocess
import os
import sys
from pathlib import Path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file])
def install_diffusers():
pip_install_requirements() |
import torch
from torchbenchmark.util.model import BenchmarkModel
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
from typing import Optional, List
class DiffuserModel(BenchmarkModel):
DIFFUSER_MODEL = True
def __init__(self, name: str, test: str, device: str, batch_size: Optional[int] = None, extra_args: List[str] = ...):
super().__init__(test, device, batch_size, extra_args)
if self.device == "cpu":
raise NotImplementedError(f"Model {self.name} does not support CPU device.")
if not self.dargs.precision == "fp16":
raise NotImplementedError(f"Model {self.name} only supports fp16 precision.")
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(name, torch_dtype=torch.float16, safety_checker=None)
pipe.to(self.device)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
self.pipe = pipe
prompt = "turn him into cyborg"
# use the same size as the example image
# https://raw.githubusercontent.com/timothybrooks/instruct-pix2pix/main/imgs/example.jpg
self.example_inputs = (prompt, torch.randn(self.batch_size, 3, 32, 32).to(self.device))
def enable_fp16_half(self):
pass
def get_module(self):
return self.pipe, self.example_inputs
def train(self):
raise NotImplementedError(f"Train is not implemented for model {self.name}")
def eval(self):
with torch.no_grad():
images = self.pipe(*self.example_inputs).images
return images
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.