diff --git a/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..01bae7899d2af797fb2066bef15e03ef0d3260ba --- /dev/null +++ b/ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14abea4c43e48d1f4affe5a9d8d7b0feee8ca83a5ed9f53867d7ae700ebd5487 +size 50332828 diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd0985ec0a45b39cbfd8bf34322494efdf27f1c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c04c0a4c618bc7b1ce415df025712224654eadb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbead676516b7691a5f57f600a43159c62567b00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a52055d761cce5c86cf1333cf614b28aef9da8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2706b9329d88da615180cc4bc25beb5ad50f850a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e21204f088a7a4da0c5e301dde46eeada47312d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..686ff03fdbd03492f9e411e0e829704b90275b37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..381b30ade35ffac0acb818bc500671fb0e608575 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e1c4d3c20f72ed23be2e0f6c1dfb99f23d282b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a998d8aac8e88503818ecb3ec45587a625d15d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cddd31346000991f28875c3b5377aa68ef2f676f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff9e61a3577c689b665d4409a73100f02845b8f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954c18e9f9c5c44acad5ae0c974d91e1049c93cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d052695da7ab07275e2df374fc2f7375cec7918e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab93d3291c8aa55566d52fb7e4ab883eac29949 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8595156b95ba895d54f46c7cba37e36285fddfea Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2124d740a4f8429bbcb638bbee81bbb2db853a6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributions/beta.py b/venv/lib/python3.10/site-packages/torch/distributions/beta.py new file mode 100644 index 0000000000000000000000000000000000000000..a802301a47edb6b63ffc0d16226c28dcbabedc0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/beta.py @@ -0,0 +1,107 @@ +from numbers import Number, Real + +import torch +from torch.distributions import constraints +from torch.distributions.dirichlet import Dirichlet +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import broadcast_all + +__all__ = ["Beta"] + + +class Beta(ExponentialFamily): + r""" + Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5])) + >>> m.sample() # Beta distributed with concentration concentration1 and concentration0 + tensor([ 0.1046]) + + Args: + concentration1 (float or Tensor): 1st concentration parameter of the distribution + (often referred to as alpha) + concentration0 (float or Tensor): 2nd concentration parameter of the distribution + (often referred to as beta) + """ + arg_constraints = { + "concentration1": constraints.positive, + "concentration0": constraints.positive, + } + support = constraints.unit_interval + has_rsample = True + + def __init__(self, concentration1, concentration0, validate_args=None): + if isinstance(concentration1, Real) and isinstance(concentration0, Real): + concentration1_concentration0 = torch.tensor( + [float(concentration1), float(concentration0)] + ) + else: + concentration1, concentration0 = broadcast_all( + concentration1, concentration0 + ) + concentration1_concentration0 = torch.stack( + [concentration1, concentration0], -1 + ) + self._dirichlet = Dirichlet( + concentration1_concentration0, validate_args=validate_args + ) + super().__init__(self._dirichlet._batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Beta, _instance) + batch_shape = torch.Size(batch_shape) + new._dirichlet = self._dirichlet.expand(batch_shape) + super(Beta, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + return self.concentration1 / (self.concentration1 + self.concentration0) + + @property + def mode(self): + return self._dirichlet.mode[..., 0] + + @property + def variance(self): + total = self.concentration1 + self.concentration0 + return self.concentration1 * self.concentration0 / (total.pow(2) * (total + 1)) + + def rsample(self, sample_shape=()): + return self._dirichlet.rsample(sample_shape).select(-1, 0) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + heads_tails = torch.stack([value, 1.0 - value], -1) + return self._dirichlet.log_prob(heads_tails) + + def entropy(self): + return self._dirichlet.entropy() + + @property + def concentration1(self): + result = self._dirichlet.concentration[..., 0] + if isinstance(result, Number): + return torch.tensor([result]) + else: + return result + + @property + def concentration0(self): + result = self._dirichlet.concentration[..., 1] + if isinstance(result, Number): + return torch.tensor([result]) + else: + return result + + @property + def _natural_params(self): + return (self.concentration1, self.concentration0) + + def _log_normalizer(self, x, y): + return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/categorical.py b/venv/lib/python3.10/site-packages/torch/distributions/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..08d2fb3ac8e8762df227246612607b1602026c72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/categorical.py @@ -0,0 +1,155 @@ +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import lazy_property, logits_to_probs, probs_to_logits + +__all__ = ["Categorical"] + + +class Categorical(Distribution): + r""" + Creates a categorical distribution parameterized by either :attr:`probs` or + :attr:`logits` (but not both). + + .. note:: + It is equivalent to the distribution that :func:`torch.multinomial` + samples from. + + Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``. + + If `probs` is 1-dimensional with length-`K`, each element is the relative probability + of sampling the class at that index. + + If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of + relative probability vectors. + + .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1 along the last dimension. :attr:`probs` + will return this normalized value. + The `logits` argument will be interpreted as unnormalized log probabilities + and can therefore be any real number. It will likewise be normalized so that + the resulting probabilities sum to 1 along the last dimension. :attr:`logits` + will return this normalized value. + + See also: :func:`torch.multinomial` + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) + >>> m.sample() # equal probability of 0, 1, 2, 3 + tensor(3) + + Args: + probs (Tensor): event probabilities + logits (Tensor): event log probabilities (unnormalized) + """ + arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} + has_enumerate_support = True + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + if probs.dim() < 1: + raise ValueError("`probs` parameter must be at least one-dimensional.") + self.probs = probs / probs.sum(-1, keepdim=True) + else: + if logits.dim() < 1: + raise ValueError("`logits` parameter must be at least one-dimensional.") + # Normalize + self.logits = logits - logits.logsumexp(dim=-1, keepdim=True) + self._param = self.probs if probs is not None else self.logits + self._num_events = self._param.size()[-1] + batch_shape = ( + self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size() + ) + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Categorical, _instance) + batch_shape = torch.Size(batch_shape) + param_shape = batch_shape + torch.Size((self._num_events,)) + if "probs" in self.__dict__: + new.probs = self.probs.expand(param_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(param_shape) + new._param = new.logits + new._num_events = self._num_events + super(Categorical, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @constraints.dependent_property(is_discrete=True, event_dim=0) + def support(self): + return constraints.integer_interval(0, self._num_events - 1) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits) + + @property + def param_shape(self): + return self._param.size() + + @property + def mean(self): + return torch.full( + self._extended_shape(), + nan, + dtype=self.probs.dtype, + device=self.probs.device, + ) + + @property + def mode(self): + return self.probs.argmax(axis=-1) + + @property + def variance(self): + return torch.full( + self._extended_shape(), + nan, + dtype=self.probs.dtype, + device=self.probs.device, + ) + + def sample(self, sample_shape=torch.Size()): + if not isinstance(sample_shape, torch.Size): + sample_shape = torch.Size(sample_shape) + probs_2d = self.probs.reshape(-1, self._num_events) + samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T + return samples_2d.reshape(self._extended_shape(sample_shape)) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + value = value.long().unsqueeze(-1) + value, log_pmf = torch.broadcast_tensors(value, self.logits) + value = value[..., :1] + return log_pmf.gather(-1, value).squeeze(-1) + + def entropy(self): + min_real = torch.finfo(self.logits.dtype).min + logits = torch.clamp(self.logits, min=min_real) + p_log_p = logits * self.probs + return -p_log_p.sum(-1) + + def enumerate_support(self, expand=True): + num_events = self._num_events + values = torch.arange(num_events, dtype=torch.long, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values diff --git a/venv/lib/python3.10/site-packages/torch/distributions/cauchy.py b/venv/lib/python3.10/site-packages/torch/distributions/cauchy.py new file mode 100644 index 0000000000000000000000000000000000000000..1a95dfe0d762d8cec467aca366f7ba13f1cb82a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/cauchy.py @@ -0,0 +1,90 @@ +import math +from numbers import Number + +import torch +from torch import inf, nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["Cauchy"] + + +class Cauchy(Distribution): + r""" + Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of + independent normally distributed random variables with means `0` follows a + Cauchy distribution. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1 + tensor([ 2.3214]) + + Args: + loc (float or Tensor): mode or median of the distribution. + scale (float or Tensor): half width at half maximum. + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Cauchy, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Cauchy, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + return torch.full( + self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device + ) + + @property + def mode(self): + return self.loc + + @property + def variance(self): + return torch.full( + self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device + ) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = self.loc.new(shape).cauchy_() + return self.loc + eps * self.scale + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return ( + -math.log(math.pi) + - self.scale.log() + - (((value - self.loc) / self.scale) ** 2).log1p() + ) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5 + + def icdf(self, value): + return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc + + def entropy(self): + return math.log(4 * math.pi) + self.scale.log() diff --git a/venv/lib/python3.10/site-packages/torch/distributions/chi2.py b/venv/lib/python3.10/site-packages/torch/distributions/chi2.py new file mode 100644 index 0000000000000000000000000000000000000000..16d0d6d60fbeb93544d21127c57f4bebcfb2bd74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/chi2.py @@ -0,0 +1,33 @@ +from torch.distributions import constraints +from torch.distributions.gamma import Gamma + +__all__ = ["Chi2"] + + +class Chi2(Gamma): + r""" + Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`. + This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)`` + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Chi2(torch.tensor([1.0])) + >>> m.sample() # Chi2 distributed with shape df=1 + tensor([ 0.1046]) + + Args: + df (float or Tensor): shape parameter of the distribution + """ + arg_constraints = {"df": constraints.positive} + + def __init__(self, df, validate_args=None): + super().__init__(0.5 * df, 0.5, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Chi2, _instance) + return super().expand(batch_shape, new) + + @property + def df(self): + return self.concentration * 2 diff --git a/venv/lib/python3.10/site-packages/torch/distributions/independent.py b/venv/lib/python3.10/site-packages/torch/distributions/independent.py new file mode 100644 index 0000000000000000000000000000000000000000..35b705fd0f29c7e95ba11c5d84a69a68822dfabb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/independent.py @@ -0,0 +1,125 @@ +from typing import Dict + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import _sum_rightmost + +__all__ = ["Independent"] + + +class Independent(Distribution): + r""" + Reinterprets some of the batch dims of a distribution as event dims. + + This is mainly useful for changing the shape of the result of + :meth:`log_prob`. For example to create a diagonal Normal distribution with + the same shape as a Multivariate Normal distribution (so they are + interchangeable), you can:: + + >>> from torch.distributions.multivariate_normal import MultivariateNormal + >>> from torch.distributions.normal import Normal + >>> loc = torch.zeros(3) + >>> scale = torch.ones(3) + >>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale)) + >>> [mvn.batch_shape, mvn.event_shape] + [torch.Size([]), torch.Size([3])] + >>> normal = Normal(loc, scale) + >>> [normal.batch_shape, normal.event_shape] + [torch.Size([3]), torch.Size([])] + >>> diagn = Independent(normal, 1) + >>> [diagn.batch_shape, diagn.event_shape] + [torch.Size([]), torch.Size([3])] + + Args: + base_distribution (torch.distributions.distribution.Distribution): a + base distribution + reinterpreted_batch_ndims (int): the number of batch dims to + reinterpret as event dims + """ + arg_constraints: Dict[str, constraints.Constraint] = {} + + def __init__( + self, base_distribution, reinterpreted_batch_ndims, validate_args=None + ): + if reinterpreted_batch_ndims > len(base_distribution.batch_shape): + raise ValueError( + "Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " + f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}" + ) + shape = base_distribution.batch_shape + base_distribution.event_shape + event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) + batch_shape = shape[: len(shape) - event_dim] + event_shape = shape[len(shape) - event_dim :] + self.base_dist = base_distribution + self.reinterpreted_batch_ndims = reinterpreted_batch_ndims + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Independent, _instance) + batch_shape = torch.Size(batch_shape) + new.base_dist = self.base_dist.expand( + batch_shape + self.event_shape[: self.reinterpreted_batch_ndims] + ) + new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + super(Independent, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @property + def has_rsample(self): + return self.base_dist.has_rsample + + @property + def has_enumerate_support(self): + if self.reinterpreted_batch_ndims > 0: + return False + return self.base_dist.has_enumerate_support + + @constraints.dependent_property + def support(self): + result = self.base_dist.support + if self.reinterpreted_batch_ndims: + result = constraints.independent(result, self.reinterpreted_batch_ndims) + return result + + @property + def mean(self): + return self.base_dist.mean + + @property + def mode(self): + return self.base_dist.mode + + @property + def variance(self): + return self.base_dist.variance + + def sample(self, sample_shape=torch.Size()): + return self.base_dist.sample(sample_shape) + + def rsample(self, sample_shape=torch.Size()): + return self.base_dist.rsample(sample_shape) + + def log_prob(self, value): + log_prob = self.base_dist.log_prob(value) + return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims) + + def entropy(self): + entropy = self.base_dist.entropy() + return _sum_rightmost(entropy, self.reinterpreted_batch_ndims) + + def enumerate_support(self, expand=True): + if self.reinterpreted_batch_ndims > 0: + raise NotImplementedError( + "Enumeration over cartesian product is not implemented" + ) + return self.base_dist.enumerate_support(expand=expand) + + def __repr__(self): + return ( + self.__class__.__name__ + + f"({self.base_dist}, {self.reinterpreted_batch_ndims})" + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py b/venv/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py new file mode 100644 index 0000000000000000000000000000000000000000..c1cb46f02fc24826ce8db3f079abd133652e8213 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py @@ -0,0 +1,142 @@ +""" +This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro). + +Original copyright notice: + +# Copyright: Contributors to the Pyro project. +# SPDX-License-Identifier: Apache-2.0 +""" + +import math + +import torch +from torch.distributions import Beta, constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["LKJCholesky"] + + +class LKJCholesky(Distribution): + r""" + LKJ distribution for lower Cholesky factor of correlation matrices. + The distribution is controlled by ``concentration`` parameter :math:`\eta` + to make the probability of the correlation matrix :math:`M` generated from + a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that, + when ``concentration == 1``, we have a uniform distribution over Cholesky + factors of correlation matrices:: + + L ~ LKJCholesky(dim, concentration) + X = L @ L' ~ LKJCorr(dim, concentration) + + Note that this distribution samples the + Cholesky factor of correlation matrices and not the correlation matrices + themselves and thereby differs slightly from the derivations in [1] for + the `LKJCorr` distribution. For sampling, this uses the Onion method from + [1] Section 3. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> l = LKJCholesky(3, 0.5) + >>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix + tensor([[ 1.0000, 0.0000, 0.0000], + [ 0.3516, 0.9361, 0.0000], + [-0.1899, 0.4748, 0.8593]]) + + Args: + dimension (dim): dimension of the matrices + concentration (float or Tensor): concentration/shape parameter of the + distribution (often referred to as eta) + + **References** + + [1] `Generating random correlation matrices based on vines and extended onion method` (2009), + Daniel Lewandowski, Dorota Kurowicka, Harry Joe. + Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008 + """ + arg_constraints = {"concentration": constraints.positive} + support = constraints.corr_cholesky + + def __init__(self, dim, concentration=1.0, validate_args=None): + if dim < 2: + raise ValueError( + f"Expected dim to be an integer greater than or equal to 2. Found dim={dim}." + ) + self.dim = dim + (self.concentration,) = broadcast_all(concentration) + batch_shape = self.concentration.size() + event_shape = torch.Size((dim, dim)) + # This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1]. + marginal_conc = self.concentration + 0.5 * (self.dim - 2) + offset = torch.arange( + self.dim - 1, + dtype=self.concentration.dtype, + device=self.concentration.device, + ) + offset = torch.cat([offset.new_zeros((1,)), offset]) + beta_conc1 = offset + 0.5 + beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset + self._beta = Beta(beta_conc1, beta_conc0) + super().__init__(batch_shape, event_shape, validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LKJCholesky, _instance) + batch_shape = torch.Size(batch_shape) + new.dim = self.dim + new.concentration = self.concentration.expand(batch_shape) + new._beta = self._beta.expand(batch_shape + (self.dim,)) + super(LKJCholesky, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def sample(self, sample_shape=torch.Size()): + # This uses the Onion method, but there are a few differences from [1] Sec. 3.2: + # - This vectorizes the for loop and also works for heterogeneous eta. + # - Same algorithm generalizes to n=1. + # - The procedure is simplified since we are sampling the cholesky factor of + # the correlation matrix instead of the correlation matrix itself. As such, + # we only need to generate `w`. + y = self._beta.sample(sample_shape).unsqueeze(-1) + u_normal = torch.randn( + self._extended_shape(sample_shape), dtype=y.dtype, device=y.device + ).tril(-1) + u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True) + # Replace NaNs in first row + u_hypersphere[..., 0, :].fill_(0.0) + w = torch.sqrt(y) * u_hypersphere + # Fill diagonal elements; clamp for numerical stability + eps = torch.finfo(w.dtype).tiny + diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt() + w += torch.diag_embed(diag_elems) + return w + + def log_prob(self, value): + # See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html + # The probability of a correlation matrix is proportional to + # determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1)) + # Additionally, the Jacobian of the transformation from Cholesky factor to + # correlation matrix is: + # prod(L_ii ^ (D - i)) + # So the probability of a Cholesky factor is propotional to + # prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i) + # with order_i = 2 * concentration - 2 + D - i + if self._validate_args: + self._validate_sample(value) + diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:] + order = torch.arange(2, self.dim + 1, device=self.concentration.device) + order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order + unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1) + # Compute normalization constant (page 1999 of [1]) + dm1 = self.dim - 1 + alpha = self.concentration + 0.5 * dm1 + denominator = torch.lgamma(alpha) * dm1 + numerator = torch.mvlgamma(alpha - 0.5, dm1) + # pi_constant in [1] is D * (D - 1) / 4 * log(pi) + # pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi) + # hence, we need to add a pi_constant = (D - 1) * log(pi) / 2 + pi_constant = 0.5 * dm1 * math.log(math.pi) + normalize_term = pi_constant + numerator - denominator + return unnormalized_log_pdf - normalize_term diff --git a/venv/lib/python3.10/site-packages/torch/distributions/log_normal.py b/venv/lib/python3.10/site-packages/torch/distributions/log_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..f6694cf9507f1b22e76fa3700e4f64240ac2ba99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/log_normal.py @@ -0,0 +1,62 @@ +from torch.distributions import constraints +from torch.distributions.normal import Normal +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import ExpTransform + +__all__ = ["LogNormal"] + + +class LogNormal(TransformedDistribution): + r""" + Creates a log-normal distribution parameterized by + :attr:`loc` and :attr:`scale` where:: + + X ~ Normal(loc, scale) + Y = exp(X) ~ LogNormal(loc, scale) + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # log-normal distributed with mean=0 and stddev=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of log of distribution + scale (float or Tensor): standard deviation of log of the distribution + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + base_dist = Normal(loc, scale, validate_args=validate_args) + super().__init__(base_dist, ExpTransform(), validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogNormal, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def loc(self): + return self.base_dist.loc + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return (self.loc + self.scale.pow(2) / 2).exp() + + @property + def mode(self): + return (self.loc - self.scale.square()).exp() + + @property + def variance(self): + scale_sq = self.scale.pow(2) + return scale_sq.expm1() * (2 * self.loc + scale_sq).exp() + + def entropy(self): + return self.base_dist.entropy() + self.loc diff --git a/venv/lib/python3.10/site-packages/torch/distributions/multinomial.py b/venv/lib/python3.10/site-packages/torch/distributions/multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..3f316e823a79577c4cd42b2b1a23264844232663 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/multinomial.py @@ -0,0 +1,135 @@ +import torch +from torch import inf +from torch.distributions import Categorical, constraints +from torch.distributions.binomial import Binomial +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["Multinomial"] + + +class Multinomial(Distribution): + r""" + Creates a Multinomial distribution parameterized by :attr:`total_count` and + either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of + :attr:`probs` indexes over categories. All other dimensions index over batches. + + Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is + called (see example below) + + .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1 along the last dimension. :attr:`probs` + will return this normalized value. + The `logits` argument will be interpreted as unnormalized log probabilities + and can therefore be any real number. It will likewise be normalized so that + the resulting probabilities sum to 1 along the last dimension. :attr:`logits` + will return this normalized value. + + - :meth:`sample` requires a single shared `total_count` for all + parameters and samples. + - :meth:`log_prob` allows different `total_count` for each parameter and + sample. + + Example:: + + >>> # xdoctest: +SKIP("FIXME: found invalid values") + >>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.])) + >>> x = m.sample() # equal probability of 0, 1, 2, 3 + tensor([ 21., 24., 30., 25.]) + + >>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x) + tensor([-4.1338]) + + Args: + total_count (int): number of trials + probs (Tensor): event probabilities + logits (Tensor): event log probabilities (unnormalized) + """ + arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} + total_count: int + + @property + def mean(self): + return self.probs * self.total_count + + @property + def variance(self): + return self.total_count * self.probs * (1 - self.probs) + + def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): + if not isinstance(total_count, int): + raise NotImplementedError("inhomogeneous total_count is not supported") + self.total_count = total_count + self._categorical = Categorical(probs=probs, logits=logits) + self._binomial = Binomial(total_count=total_count, probs=self.probs) + batch_shape = self._categorical.batch_shape + event_shape = self._categorical.param_shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Multinomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count + new._categorical = self._categorical.expand(batch_shape) + super(Multinomial, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._categorical._new(*args, **kwargs) + + @constraints.dependent_property(is_discrete=True, event_dim=1) + def support(self): + return constraints.multinomial(self.total_count) + + @property + def logits(self): + return self._categorical.logits + + @property + def probs(self): + return self._categorical.probs + + @property + def param_shape(self): + return self._categorical.param_shape + + def sample(self, sample_shape=torch.Size()): + sample_shape = torch.Size(sample_shape) + samples = self._categorical.sample( + torch.Size((self.total_count,)) + sample_shape + ) + # samples.shape is (total_count, sample_shape, batch_shape), need to change it to + # (sample_shape, batch_shape, total_count) + shifted_idx = list(range(samples.dim())) + shifted_idx.append(shifted_idx.pop(0)) + samples = samples.permute(*shifted_idx) + counts = samples.new(self._extended_shape(sample_shape)).zero_() + counts.scatter_add_(-1, samples, torch.ones_like(samples)) + return counts.type_as(self.probs) + + def entropy(self): + n = torch.tensor(self.total_count) + + cat_entropy = self._categorical.entropy() + term1 = n * cat_entropy - torch.lgamma(n + 1) + + support = self._binomial.enumerate_support(expand=False)[1:] + binomial_probs = torch.exp(self._binomial.log_prob(support)) + weights = torch.lgamma(support + 1) + term2 = (binomial_probs * weights).sum([0, -1]) + + return term1 + term2 + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + logits = logits.clone(memory_format=torch.contiguous_format) + log_factorial_n = torch.lgamma(value.sum(-1) + 1) + log_factorial_xs = torch.lgamma(value + 1).sum(-1) + logits[(value == 0) & (logits == -inf)] = 0 + log_powers = (logits * value).sum(-1) + return log_factorial_n - log_factorial_xs + log_powers diff --git a/venv/lib/python3.10/site-packages/torch/distributions/pareto.py b/venv/lib/python3.10/site-packages/torch/distributions/pareto.py new file mode 100644 index 0000000000000000000000000000000000000000..07cfb417a814e18a327ca93a155f0fbd905f60d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/pareto.py @@ -0,0 +1,60 @@ +from torch.distributions import constraints +from torch.distributions.exponential import Exponential +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, ExpTransform +from torch.distributions.utils import broadcast_all + +__all__ = ["Pareto"] + + +class Pareto(TransformedDistribution): + r""" + Samples from a Pareto Type 1 distribution. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1 + tensor([ 1.5623]) + + Args: + scale (float or Tensor): Scale parameter of the distribution + alpha (float or Tensor): Shape parameter of the distribution + """ + arg_constraints = {"alpha": constraints.positive, "scale": constraints.positive} + + def __init__(self, scale, alpha, validate_args=None): + self.scale, self.alpha = broadcast_all(scale, alpha) + base_dist = Exponential(self.alpha, validate_args=validate_args) + transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Pareto, _instance) + new.scale = self.scale.expand(batch_shape) + new.alpha = self.alpha.expand(batch_shape) + return super().expand(batch_shape, _instance=new) + + @property + def mean(self): + # mean is inf for alpha <= 1 + a = self.alpha.clamp(min=1) + return a * self.scale / (a - 1) + + @property + def mode(self): + return self.scale + + @property + def variance(self): + # var is inf for alpha <= 2 + a = self.alpha.clamp(min=2) + return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2)) + + @constraints.dependent_property(is_discrete=False, event_dim=0) + def support(self): + return constraints.greater_than_eq(self.scale) + + def entropy(self): + return (self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/relaxed_bernoulli.py b/venv/lib/python3.10/site-packages/torch/distributions/relaxed_bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..05e0995e4a33690638e6cec7b2559843c2ccdd66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/relaxed_bernoulli.py @@ -0,0 +1,149 @@ +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import SigmoidTransform +from torch.distributions.utils import ( + broadcast_all, + clamp_probs, + lazy_property, + logits_to_probs, + probs_to_logits, +) + +__all__ = ["LogitRelaxedBernoulli", "RelaxedBernoulli"] + + +class LogitRelaxedBernoulli(Distribution): + r""" + Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli + distribution. + + Samples are logits of values in (0, 1). See [1] for more details. + + Args: + temperature (Tensor): relaxation temperature + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + + [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random + Variables (Maddison et al, 2017) + + [2] Categorical Reparametrization with Gumbel-Softmax + (Jang et al, 2017) + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.real + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + self.temperature = temperature + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + is_scalar = isinstance(probs, Number) + (self.probs,) = broadcast_all(probs) + else: + is_scalar = isinstance(logits, Number) + (self.logits,) = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogitRelaxedBernoulli, _instance) + batch_shape = torch.Size(batch_shape) + new.temperature = self.temperature + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + probs = clamp_probs(self.probs.expand(shape)) + uniforms = clamp_probs( + torch.rand(shape, dtype=probs.dtype, device=probs.device) + ) + return ( + uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p() + ) / self.temperature + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + diff = logits - value.mul(self.temperature) + return self.temperature.log() + diff - 2 * diff.exp().log1p() + + +class RelaxedBernoulli(TransformedDistribution): + r""" + Creates a RelaxedBernoulli distribution, parametrized by + :attr:`temperature`, and either :attr:`probs` or :attr:`logits` + (but not both). This is a relaxed version of the `Bernoulli` distribution, + so the values are in (0, 1), and has reparametrizable samples. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = RelaxedBernoulli(torch.tensor([2.2]), + ... torch.tensor([0.1, 0.2, 0.3, 0.99])) + >>> m.sample() + tensor([ 0.2951, 0.3442, 0.8918, 0.9021]) + + Args: + temperature (Tensor): relaxation temperature + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.unit_interval + has_rsample = True + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + base_dist = LogitRelaxedBernoulli(temperature, probs, logits) + super().__init__(base_dist, SigmoidTransform(), validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(RelaxedBernoulli, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def temperature(self): + return self.base_dist.temperature + + @property + def logits(self): + return self.base_dist.logits + + @property + def probs(self): + return self.base_dist.probs diff --git a/venv/lib/python3.10/site-packages/torch/distributions/transforms.py b/venv/lib/python3.10/site-packages/torch/distributions/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..f2907caa60180a93b9ebf62354479db4292e2c95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/transforms.py @@ -0,0 +1,1245 @@ +import functools +import math +import numbers +import operator +import weakref +from typing import List + +import torch +import torch.nn.functional as F +from torch.distributions import constraints +from torch.distributions.utils import ( + _sum_rightmost, + broadcast_all, + lazy_property, + tril_matrix_to_vec, + vec_to_tril_matrix, +) +from torch.nn.functional import pad, softplus + +__all__ = [ + "AbsTransform", + "AffineTransform", + "CatTransform", + "ComposeTransform", + "CorrCholeskyTransform", + "CumulativeDistributionTransform", + "ExpTransform", + "IndependentTransform", + "LowerCholeskyTransform", + "PositiveDefiniteTransform", + "PowerTransform", + "ReshapeTransform", + "SigmoidTransform", + "SoftplusTransform", + "TanhTransform", + "SoftmaxTransform", + "StackTransform", + "StickBreakingTransform", + "Transform", + "identity_transform", +] + + +class Transform: + """ + Abstract class for invertable transformations with computable log + det jacobians. They are primarily used in + :class:`torch.distributions.TransformedDistribution`. + + Caching is useful for transforms whose inverses are either expensive or + numerically unstable. Note that care must be taken with memoized values + since the autograd graph may be reversed. For example while the following + works with or without caching:: + + y = t(x) + t.log_abs_det_jacobian(x, y).backward() # x will receive gradients. + + However the following will error when caching due to dependency reversal:: + + y = t(x) + z = t.inv(y) + grad(z.sum(), [y]) # error because z is x + + Derived classes should implement one or both of :meth:`_call` or + :meth:`_inverse`. Derived classes that set `bijective=True` should also + implement :meth:`log_abs_det_jacobian`. + + Args: + cache_size (int): Size of cache. If zero, no caching is done. If one, + the latest single value is cached. Only 0 and 1 are supported. + + Attributes: + domain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid inputs to this transform. + codomain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid outputs to this transform + which are inputs to the inverse transform. + bijective (bool): Whether this transform is bijective. A transform + ``t`` is bijective iff ``t.inv(t(x)) == x`` and + ``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in + the codomain. Transforms that are not bijective should at least + maintain the weaker pseudoinverse properties + ``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``. + sign (int or Tensor): For bijective univariate transforms, this + should be +1 or -1 depending on whether transform is monotone + increasing or decreasing. + """ + + bijective = False + domain: constraints.Constraint + codomain: constraints.Constraint + + def __init__(self, cache_size=0): + self._cache_size = cache_size + self._inv = None + if cache_size == 0: + pass # default behavior + elif cache_size == 1: + self._cached_x_y = None, None + else: + raise ValueError("cache_size must be 0 or 1") + super().__init__() + + def __getstate__(self): + state = self.__dict__.copy() + state["_inv"] = None + return state + + @property + def event_dim(self): + if self.domain.event_dim == self.codomain.event_dim: + return self.domain.event_dim + raise ValueError("Please use either .domain.event_dim or .codomain.event_dim") + + @property + def inv(self): + """ + Returns the inverse :class:`Transform` of this transform. + This should satisfy ``t.inv.inv is t``. + """ + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = _InverseTransform(self) + self._inv = weakref.ref(inv) + return inv + + @property + def sign(self): + """ + Returns the sign of the determinant of the Jacobian, if applicable. + In general this only makes sense for bijective transforms. + """ + raise NotImplementedError + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + if type(self).__init__ is Transform.__init__: + return type(self)(cache_size=cache_size) + raise NotImplementedError(f"{type(self)}.with_cache is not implemented") + + def __eq__(self, other): + return self is other + + def __ne__(self, other): + # Necessary for Python2 + return not self.__eq__(other) + + def __call__(self, x): + """ + Computes the transform `x => y`. + """ + if self._cache_size == 0: + return self._call(x) + x_old, y_old = self._cached_x_y + if x is x_old: + return y_old + y = self._call(x) + self._cached_x_y = x, y + return y + + def _inv_call(self, y): + """ + Inverts the transform `y => x`. + """ + if self._cache_size == 0: + return self._inverse(y) + x_old, y_old = self._cached_x_y + if y is y_old: + return x_old + x = self._inverse(y) + self._cached_x_y = x, y + return x + + def _call(self, x): + """ + Abstract method to compute forward transformation. + """ + raise NotImplementedError + + def _inverse(self, y): + """ + Abstract method to compute inverse transformation. + """ + raise NotImplementedError + + def log_abs_det_jacobian(self, x, y): + """ + Computes the log det jacobian `log |dy/dx|` given input and output. + """ + raise NotImplementedError + + def __repr__(self): + return self.__class__.__name__ + "()" + + def forward_shape(self, shape): + """ + Infers the shape of the forward computation, given the input shape. + Defaults to preserving shape. + """ + return shape + + def inverse_shape(self, shape): + """ + Infers the shapes of the inverse computation, given the output shape. + Defaults to preserving shape. + """ + return shape + + +class _InverseTransform(Transform): + """ + Inverts a single :class:`Transform`. + This class is private; please instead use the ``Transform.inv`` property. + """ + + def __init__(self, transform: Transform): + super().__init__(cache_size=transform._cache_size) + self._inv: Transform = transform + + @constraints.dependent_property(is_discrete=False) + def domain(self): + assert self._inv is not None + return self._inv.codomain + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + assert self._inv is not None + return self._inv.domain + + @property + def bijective(self): + assert self._inv is not None + return self._inv.bijective + + @property + def sign(self): + assert self._inv is not None + return self._inv.sign + + @property + def inv(self): + return self._inv + + def with_cache(self, cache_size=1): + assert self._inv is not None + return self.inv.with_cache(cache_size).inv + + def __eq__(self, other): + if not isinstance(other, _InverseTransform): + return False + assert self._inv is not None + return self._inv == other._inv + + def __repr__(self): + return f"{self.__class__.__name__}({repr(self._inv)})" + + def __call__(self, x): + assert self._inv is not None + return self._inv._inv_call(x) + + def log_abs_det_jacobian(self, x, y): + assert self._inv is not None + return -self._inv.log_abs_det_jacobian(y, x) + + def forward_shape(self, shape): + return self._inv.inverse_shape(shape) + + def inverse_shape(self, shape): + return self._inv.forward_shape(shape) + + +class ComposeTransform(Transform): + """ + Composes multiple transforms in a chain. + The transforms being composed are responsible for caching. + + Args: + parts (list of :class:`Transform`): A list of transforms to compose. + cache_size (int): Size of cache. If zero, no caching is done. If one, + the latest single value is cached. Only 0 and 1 are supported. + """ + + def __init__(self, parts: List[Transform], cache_size=0): + if cache_size: + parts = [part.with_cache(cache_size) for part in parts] + super().__init__(cache_size=cache_size) + self.parts = parts + + def __eq__(self, other): + if not isinstance(other, ComposeTransform): + return False + return self.parts == other.parts + + @constraints.dependent_property(is_discrete=False) + def domain(self): + if not self.parts: + return constraints.real + domain = self.parts[0].domain + # Adjust event_dim to be maximum among all parts. + event_dim = self.parts[-1].codomain.event_dim + for part in reversed(self.parts): + event_dim += part.domain.event_dim - part.codomain.event_dim + event_dim = max(event_dim, part.domain.event_dim) + assert event_dim >= domain.event_dim + if event_dim > domain.event_dim: + domain = constraints.independent(domain, event_dim - domain.event_dim) + return domain + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + if not self.parts: + return constraints.real + codomain = self.parts[-1].codomain + # Adjust event_dim to be maximum among all parts. + event_dim = self.parts[0].domain.event_dim + for part in self.parts: + event_dim += part.codomain.event_dim - part.domain.event_dim + event_dim = max(event_dim, part.codomain.event_dim) + assert event_dim >= codomain.event_dim + if event_dim > codomain.event_dim: + codomain = constraints.independent(codomain, event_dim - codomain.event_dim) + return codomain + + @lazy_property + def bijective(self): + return all(p.bijective for p in self.parts) + + @lazy_property + def sign(self): + sign = 1 + for p in self.parts: + sign = sign * p.sign + return sign + + @property + def inv(self): + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = ComposeTransform([p.inv for p in reversed(self.parts)]) + self._inv = weakref.ref(inv) + inv._inv = weakref.ref(self) + return inv + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return ComposeTransform(self.parts, cache_size=cache_size) + + def __call__(self, x): + for part in self.parts: + x = part(x) + return x + + def log_abs_det_jacobian(self, x, y): + if not self.parts: + return torch.zeros_like(x) + + # Compute intermediates. This will be free if parts[:-1] are all cached. + xs = [x] + for part in self.parts[:-1]: + xs.append(part(xs[-1])) + xs.append(y) + + terms = [] + event_dim = self.domain.event_dim + for part, x, y in zip(self.parts, xs[:-1], xs[1:]): + terms.append( + _sum_rightmost( + part.log_abs_det_jacobian(x, y), event_dim - part.domain.event_dim + ) + ) + event_dim += part.codomain.event_dim - part.domain.event_dim + return functools.reduce(operator.add, terms) + + def forward_shape(self, shape): + for part in self.parts: + shape = part.forward_shape(shape) + return shape + + def inverse_shape(self, shape): + for part in reversed(self.parts): + shape = part.inverse_shape(shape) + return shape + + def __repr__(self): + fmt_string = self.__class__.__name__ + "(\n " + fmt_string += ",\n ".join([p.__repr__() for p in self.parts]) + fmt_string += "\n)" + return fmt_string + + +identity_transform = ComposeTransform([]) + + +class IndependentTransform(Transform): + """ + Wrapper around another transform to treat + ``reinterpreted_batch_ndims``-many extra of the right most dimensions as + dependent. This has no effect on the forward or backward transforms, but + does sum out ``reinterpreted_batch_ndims``-many of the rightmost dimensions + in :meth:`log_abs_det_jacobian`. + + Args: + base_transform (:class:`Transform`): A base transform. + reinterpreted_batch_ndims (int): The number of extra rightmost + dimensions to treat as dependent. + """ + + def __init__(self, base_transform, reinterpreted_batch_ndims, cache_size=0): + super().__init__(cache_size=cache_size) + self.base_transform = base_transform.with_cache(cache_size) + self.reinterpreted_batch_ndims = reinterpreted_batch_ndims + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return IndependentTransform( + self.base_transform, self.reinterpreted_batch_ndims, cache_size=cache_size + ) + + @constraints.dependent_property(is_discrete=False) + def domain(self): + return constraints.independent( + self.base_transform.domain, self.reinterpreted_batch_ndims + ) + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + return constraints.independent( + self.base_transform.codomain, self.reinterpreted_batch_ndims + ) + + @property + def bijective(self): + return self.base_transform.bijective + + @property + def sign(self): + return self.base_transform.sign + + def _call(self, x): + if x.dim() < self.domain.event_dim: + raise ValueError("Too few dimensions on input") + return self.base_transform(x) + + def _inverse(self, y): + if y.dim() < self.codomain.event_dim: + raise ValueError("Too few dimensions on input") + return self.base_transform.inv(y) + + def log_abs_det_jacobian(self, x, y): + result = self.base_transform.log_abs_det_jacobian(x, y) + result = _sum_rightmost(result, self.reinterpreted_batch_ndims) + return result + + def __repr__(self): + return f"{self.__class__.__name__}({repr(self.base_transform)}, {self.reinterpreted_batch_ndims})" + + def forward_shape(self, shape): + return self.base_transform.forward_shape(shape) + + def inverse_shape(self, shape): + return self.base_transform.inverse_shape(shape) + + +class ReshapeTransform(Transform): + """ + Unit Jacobian transform to reshape the rightmost part of a tensor. + + Note that ``in_shape`` and ``out_shape`` must have the same number of + elements, just as for :meth:`torch.Tensor.reshape`. + + Arguments: + in_shape (torch.Size): The input event shape. + out_shape (torch.Size): The output event shape. + """ + + bijective = True + + def __init__(self, in_shape, out_shape, cache_size=0): + self.in_shape = torch.Size(in_shape) + self.out_shape = torch.Size(out_shape) + if self.in_shape.numel() != self.out_shape.numel(): + raise ValueError("in_shape, out_shape have different numbers of elements") + super().__init__(cache_size=cache_size) + + @constraints.dependent_property + def domain(self): + return constraints.independent(constraints.real, len(self.in_shape)) + + @constraints.dependent_property + def codomain(self): + return constraints.independent(constraints.real, len(self.out_shape)) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return ReshapeTransform(self.in_shape, self.out_shape, cache_size=cache_size) + + def _call(self, x): + batch_shape = x.shape[: x.dim() - len(self.in_shape)] + return x.reshape(batch_shape + self.out_shape) + + def _inverse(self, y): + batch_shape = y.shape[: y.dim() - len(self.out_shape)] + return y.reshape(batch_shape + self.in_shape) + + def log_abs_det_jacobian(self, x, y): + batch_shape = x.shape[: x.dim() - len(self.in_shape)] + return x.new_zeros(batch_shape) + + def forward_shape(self, shape): + if len(shape) < len(self.in_shape): + raise ValueError("Too few dimensions on input") + cut = len(shape) - len(self.in_shape) + if shape[cut:] != self.in_shape: + raise ValueError( + f"Shape mismatch: expected {shape[cut:]} but got {self.in_shape}" + ) + return shape[:cut] + self.out_shape + + def inverse_shape(self, shape): + if len(shape) < len(self.out_shape): + raise ValueError("Too few dimensions on input") + cut = len(shape) - len(self.out_shape) + if shape[cut:] != self.out_shape: + raise ValueError( + f"Shape mismatch: expected {shape[cut:]} but got {self.out_shape}" + ) + return shape[:cut] + self.in_shape + + +class ExpTransform(Transform): + r""" + Transform via the mapping :math:`y = \exp(x)`. + """ + domain = constraints.real + codomain = constraints.positive + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, ExpTransform) + + def _call(self, x): + return x.exp() + + def _inverse(self, y): + return y.log() + + def log_abs_det_jacobian(self, x, y): + return x + + +class PowerTransform(Transform): + r""" + Transform via the mapping :math:`y = x^{\text{exponent}}`. + """ + domain = constraints.positive + codomain = constraints.positive + bijective = True + + def __init__(self, exponent, cache_size=0): + super().__init__(cache_size=cache_size) + (self.exponent,) = broadcast_all(exponent) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return PowerTransform(self.exponent, cache_size=cache_size) + + @lazy_property + def sign(self): + return self.exponent.sign() + + def __eq__(self, other): + if not isinstance(other, PowerTransform): + return False + return self.exponent.eq(other.exponent).all().item() + + def _call(self, x): + return x.pow(self.exponent) + + def _inverse(self, y): + return y.pow(1 / self.exponent) + + def log_abs_det_jacobian(self, x, y): + return (self.exponent * y / x).abs().log() + + def forward_shape(self, shape): + return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ())) + + def inverse_shape(self, shape): + return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ())) + + +def _clipped_sigmoid(x): + finfo = torch.finfo(x.dtype) + return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1.0 - finfo.eps) + + +class SigmoidTransform(Transform): + r""" + Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`. + """ + domain = constraints.real + codomain = constraints.unit_interval + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, SigmoidTransform) + + def _call(self, x): + return _clipped_sigmoid(x) + + def _inverse(self, y): + finfo = torch.finfo(y.dtype) + y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps) + return y.log() - (-y).log1p() + + def log_abs_det_jacobian(self, x, y): + return -F.softplus(-x) - F.softplus(x) + + +class SoftplusTransform(Transform): + r""" + Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`. + The implementation reverts to the linear function when :math:`x > 20`. + """ + domain = constraints.real + codomain = constraints.positive + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, SoftplusTransform) + + def _call(self, x): + return softplus(x) + + def _inverse(self, y): + return (-y).expm1().neg().log() + y + + def log_abs_det_jacobian(self, x, y): + return -softplus(-x) + + +class TanhTransform(Transform): + r""" + Transform via the mapping :math:`y = \tanh(x)`. + + It is equivalent to + ``` + ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)]) + ``` + However this might not be numerically stable, thus it is recommended to use `TanhTransform` + instead. + + Note that one should use `cache_size=1` when it comes to `NaN/Inf` values. + + """ + domain = constraints.real + codomain = constraints.interval(-1.0, 1.0) + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, TanhTransform) + + def _call(self, x): + return x.tanh() + + def _inverse(self, y): + # We do not clamp to the boundary here as it may degrade the performance of certain algorithms. + # one should use `cache_size=1` instead + return torch.atanh(y) + + def log_abs_det_jacobian(self, x, y): + # We use a formula that is more numerically stable, see details in the following link + # https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80 + return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x)) + + +class AbsTransform(Transform): + r""" + Transform via the mapping :math:`y = |x|`. + """ + domain = constraints.real + codomain = constraints.positive + + def __eq__(self, other): + return isinstance(other, AbsTransform) + + def _call(self, x): + return x.abs() + + def _inverse(self, y): + return y + + +class AffineTransform(Transform): + r""" + Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`. + + Args: + loc (Tensor or float): Location parameter. + scale (Tensor or float): Scale parameter. + event_dim (int): Optional size of `event_shape`. This should be zero + for univariate random variables, 1 for distributions over vectors, + 2 for distributions over matrices, etc. + """ + bijective = True + + def __init__(self, loc, scale, event_dim=0, cache_size=0): + super().__init__(cache_size=cache_size) + self.loc = loc + self.scale = scale + self._event_dim = event_dim + + @property + def event_dim(self): + return self._event_dim + + @constraints.dependent_property(is_discrete=False) + def domain(self): + if self.event_dim == 0: + return constraints.real + return constraints.independent(constraints.real, self.event_dim) + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + if self.event_dim == 0: + return constraints.real + return constraints.independent(constraints.real, self.event_dim) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return AffineTransform( + self.loc, self.scale, self.event_dim, cache_size=cache_size + ) + + def __eq__(self, other): + if not isinstance(other, AffineTransform): + return False + + if isinstance(self.loc, numbers.Number) and isinstance( + other.loc, numbers.Number + ): + if self.loc != other.loc: + return False + else: + if not (self.loc == other.loc).all().item(): + return False + + if isinstance(self.scale, numbers.Number) and isinstance( + other.scale, numbers.Number + ): + if self.scale != other.scale: + return False + else: + if not (self.scale == other.scale).all().item(): + return False + + return True + + @property + def sign(self): + if isinstance(self.scale, numbers.Real): + return 1 if float(self.scale) > 0 else -1 if float(self.scale) < 0 else 0 + return self.scale.sign() + + def _call(self, x): + return self.loc + self.scale * x + + def _inverse(self, y): + return (y - self.loc) / self.scale + + def log_abs_det_jacobian(self, x, y): + shape = x.shape + scale = self.scale + if isinstance(scale, numbers.Real): + result = torch.full_like(x, math.log(abs(scale))) + else: + result = torch.abs(scale).log() + if self.event_dim: + result_size = result.size()[: -self.event_dim] + (-1,) + result = result.view(result_size).sum(-1) + shape = shape[: -self.event_dim] + return result.expand(shape) + + def forward_shape(self, shape): + return torch.broadcast_shapes( + shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ()) + ) + + def inverse_shape(self, shape): + return torch.broadcast_shapes( + shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ()) + ) + + +class CorrCholeskyTransform(Transform): + r""" + Transforms an uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the + Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower + triangular matrix with positive diagonals and unit Euclidean norm for each row. + The transform is processed as follows: + + 1. First we convert x into a lower triangular matrix in row order. + 2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of + class :class:`StickBreakingTransform` to transform :math:`X_i` into a + unit Euclidean length vector using the following steps: + - Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`. + - Transforms into an unsigned domain: :math:`z_i = r_i^2`. + - Applies :math:`s_i = StickBreakingTransform(z_i)`. + - Transforms back into signed domain: :math:`y_i = sign(r_i) * \sqrt{s_i}`. + """ + domain = constraints.real_vector + codomain = constraints.corr_cholesky + bijective = True + + def _call(self, x): + x = torch.tanh(x) + eps = torch.finfo(x.dtype).eps + x = x.clamp(min=-1 + eps, max=1 - eps) + r = vec_to_tril_matrix(x, diag=-1) + # apply stick-breaking on the squared values + # Note that y = sign(r) * sqrt(z * z1m_cumprod) + # = (sign(r) * sqrt(z)) * sqrt(z1m_cumprod) = r * sqrt(z1m_cumprod) + z = r**2 + z1m_cumprod_sqrt = (1 - z).sqrt().cumprod(-1) + # Diagonal elements must be 1. + r = r + torch.eye(r.shape[-1], dtype=r.dtype, device=r.device) + y = r * pad(z1m_cumprod_sqrt[..., :-1], [1, 0], value=1) + return y + + def _inverse(self, y): + # inverse stick-breaking + # See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html + y_cumsum = 1 - torch.cumsum(y * y, dim=-1) + y_cumsum_shifted = pad(y_cumsum[..., :-1], [1, 0], value=1) + y_vec = tril_matrix_to_vec(y, diag=-1) + y_cumsum_vec = tril_matrix_to_vec(y_cumsum_shifted, diag=-1) + t = y_vec / (y_cumsum_vec).sqrt() + # inverse of tanh + x = (t.log1p() - t.neg().log1p()) / 2 + return x + + def log_abs_det_jacobian(self, x, y, intermediates=None): + # Because domain and codomain are two spaces with different dimensions, determinant of + # Jacobian is not well-defined. We return `log_abs_det_jacobian` of `x` and the + # flattened lower triangular part of `y`. + + # See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html + y1m_cumsum = 1 - (y * y).cumsum(dim=-1) + # by taking diagonal=-2, we don't need to shift z_cumprod to the right + # also works for 2 x 2 matrix + y1m_cumsum_tril = tril_matrix_to_vec(y1m_cumsum, diag=-2) + stick_breaking_logdet = 0.5 * (y1m_cumsum_tril).log().sum(-1) + tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.0)).sum(dim=-1) + return stick_breaking_logdet + tanh_logdet + + def forward_shape(self, shape): + # Reshape from (..., N) to (..., D, D). + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + N = shape[-1] + D = round((0.25 + 2 * N) ** 0.5 + 0.5) + if D * (D - 1) // 2 != N: + raise ValueError("Input is not a flattend lower-diagonal number") + return shape[:-1] + (D, D) + + def inverse_shape(self, shape): + # Reshape from (..., D, D) to (..., N). + if len(shape) < 2: + raise ValueError("Too few dimensions on input") + if shape[-2] != shape[-1]: + raise ValueError("Input is not square") + D = shape[-1] + N = D * (D - 1) // 2 + return shape[:-2] + (N,) + + +class SoftmaxTransform(Transform): + r""" + Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then + normalizing. + + This is not bijective and cannot be used for HMC. However this acts mostly + coordinate-wise (except for the final normalization), and thus is + appropriate for coordinate-wise optimization algorithms. + """ + domain = constraints.real_vector + codomain = constraints.simplex + + def __eq__(self, other): + return isinstance(other, SoftmaxTransform) + + def _call(self, x): + logprobs = x + probs = (logprobs - logprobs.max(-1, True)[0]).exp() + return probs / probs.sum(-1, True) + + def _inverse(self, y): + probs = y + return probs.log() + + def forward_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape + + def inverse_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape + + +class StickBreakingTransform(Transform): + """ + Transform from unconstrained space to the simplex of one additional + dimension via a stick-breaking process. + + This transform arises as an iterated sigmoid transform in a stick-breaking + construction of the `Dirichlet` distribution: the first logit is + transformed via sigmoid to the first probability and the probability of + everything else, and then the process recurses. + + This is bijective and appropriate for use in HMC; however it mixes + coordinates together and is less appropriate for optimization. + """ + + domain = constraints.real_vector + codomain = constraints.simplex + bijective = True + + def __eq__(self, other): + return isinstance(other, StickBreakingTransform) + + def _call(self, x): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + z = _clipped_sigmoid(x - offset.log()) + z_cumprod = (1 - z).cumprod(-1) + y = pad(z, [0, 1], value=1) * pad(z_cumprod, [1, 0], value=1) + return y + + def _inverse(self, y): + y_crop = y[..., :-1] + offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1) + sf = 1 - y_crop.cumsum(-1) + # we clamp to make sure that sf is positive which sometimes does not + # happen when y[-1] ~ 0 or y[:-1].sum() ~ 1 + sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny) + x = y_crop.log() - sf.log() + offset.log() + return x + + def log_abs_det_jacobian(self, x, y): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + x = x - offset.log() + # use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x) + detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1) + return detJ + + def forward_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape[:-1] + (shape[-1] + 1,) + + def inverse_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape[:-1] + (shape[-1] - 1,) + + +class LowerCholeskyTransform(Transform): + """ + Transform from unconstrained matrices to lower-triangular matrices with + nonnegative diagonal entries. + + This is useful for parameterizing positive definite matrices in terms of + their Cholesky factorization. + """ + + domain = constraints.independent(constraints.real, 2) + codomain = constraints.lower_cholesky + + def __eq__(self, other): + return isinstance(other, LowerCholeskyTransform) + + def _call(self, x): + return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed() + + def _inverse(self, y): + return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed() + + +class PositiveDefiniteTransform(Transform): + """ + Transform from unconstrained matrices to positive-definite matrices. + """ + + domain = constraints.independent(constraints.real, 2) + codomain = constraints.positive_definite # type: ignore[assignment] + + def __eq__(self, other): + return isinstance(other, PositiveDefiniteTransform) + + def _call(self, x): + x = LowerCholeskyTransform()(x) + return x @ x.mT + + def _inverse(self, y): + y = torch.linalg.cholesky(y) + return LowerCholeskyTransform().inv(y) + + +class CatTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim`, of length `lengths[dim]`, + in a way compatible with :func:`torch.cat`. + + Example:: + + x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) + x = torch.cat([x0, x0], dim=0) + t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) + t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) + y = t(x) + """ + + transforms: List[Transform] + + def __init__(self, tseq, dim=0, lengths=None, cache_size=0): + assert all(isinstance(t, Transform) for t in tseq) + if cache_size: + tseq = [t.with_cache(cache_size) for t in tseq] + super().__init__(cache_size=cache_size) + self.transforms = list(tseq) + if lengths is None: + lengths = [1] * len(self.transforms) + self.lengths = list(lengths) + assert len(self.lengths) == len(self.transforms) + self.dim = dim + + @lazy_property + def event_dim(self): + return max(t.event_dim for t in self.transforms) + + @lazy_property + def length(self): + return sum(self.lengths) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return CatTransform(self.transforms, self.dim, self.lengths, cache_size) + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + yslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslices.append(trans(xslice)) + start = start + length # avoid += for jit compat + return torch.cat(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + xslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + yslice = y.narrow(self.dim, start, length) + xslices.append(trans.inv(yslice)) + start = start + length # avoid += for jit compat + return torch.cat(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + logdetjacs = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslice = y.narrow(self.dim, start, length) + logdetjac = trans.log_abs_det_jacobian(xslice, yslice) + if trans.event_dim < self.event_dim: + logdetjac = _sum_rightmost(logdetjac, self.event_dim - trans.event_dim) + logdetjacs.append(logdetjac) + start = start + length # avoid += for jit compat + # Decide whether to concatenate or sum. + dim = self.dim + if dim >= 0: + dim = dim - x.dim() + dim = dim + self.event_dim + if dim < 0: + return torch.cat(logdetjacs, dim=dim) + else: + return sum(logdetjacs) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.cat( + [t.domain for t in self.transforms], self.dim, self.lengths + ) + + @constraints.dependent_property + def codomain(self): + return constraints.cat( + [t.codomain for t in self.transforms], self.dim, self.lengths + ) + + +class StackTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim` + in a way compatible with :func:`torch.stack`. + + Example:: + + x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1) + t = StackTransform([ExpTransform(), identity_transform], dim=1) + y = t(x) + """ + + transforms: List[Transform] + + def __init__(self, tseq, dim=0, cache_size=0): + assert all(isinstance(t, Transform) for t in tseq) + if cache_size: + tseq = [t.with_cache(cache_size) for t in tseq] + super().__init__(cache_size=cache_size) + self.transforms = list(tseq) + self.dim = dim + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return StackTransform(self.transforms, self.dim, cache_size) + + def _slice(self, z): + return [z.select(self.dim, i) for i in range(z.size(self.dim))] + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + yslices = [] + for xslice, trans in zip(self._slice(x), self.transforms): + yslices.append(trans(xslice)) + return torch.stack(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + xslices = [] + for yslice, trans in zip(self._slice(y), self.transforms): + xslices.append(trans.inv(yslice)) + return torch.stack(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + logdetjacs = [] + yslices = self._slice(y) + xslices = self._slice(x) + for xslice, yslice, trans in zip(xslices, yslices, self.transforms): + logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) + return torch.stack(logdetjacs, dim=self.dim) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.stack([t.domain for t in self.transforms], self.dim) + + @constraints.dependent_property + def codomain(self): + return constraints.stack([t.codomain for t in self.transforms], self.dim) + + +class CumulativeDistributionTransform(Transform): + """ + Transform via the cumulative distribution function of a probability distribution. + + Args: + distribution (Distribution): Distribution whose cumulative distribution function to use for + the transformation. + + Example:: + + # Construct a Gaussian copula from a multivariate normal. + base_dist = MultivariateNormal( + loc=torch.zeros(2), + scale_tril=LKJCholesky(2).sample(), + ) + transform = CumulativeDistributionTransform(Normal(0, 1)) + copula = TransformedDistribution(base_dist, [transform]) + """ + + bijective = True + codomain = constraints.unit_interval + sign = +1 + + def __init__(self, distribution, cache_size=0): + super().__init__(cache_size=cache_size) + self.distribution = distribution + + @property + def domain(self): + return self.distribution.support + + def _call(self, x): + return self.distribution.cdf(x) + + def _inverse(self, y): + return self.distribution.icdf(y) + + def log_abs_det_jacobian(self, x, y): + return self.distribution.log_prob(x) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return CumulativeDistributionTransform(self.distribution, cache_size=cache_size) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/uniform.py b/venv/lib/python3.10/site-packages/torch/distributions/uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..e939bb4aae39bef7e0c64bb5fb546a90b381c036 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/uniform.py @@ -0,0 +1,99 @@ +from numbers import Number + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["Uniform"] + + +class Uniform(Distribution): + r""" + Generates uniformly distributed random samples from the half-open interval + ``[low, high)``. + + Example:: + + >>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0])) + >>> m.sample() # uniformly distributed in the range [0.0, 5.0) + >>> # xdoctest: +SKIP + tensor([ 2.3418]) + + Args: + low (float or Tensor): lower range (inclusive). + high (float or Tensor): upper range (exclusive). + """ + # TODO allow (loc,scale) parameterization to allow independent constraints. + arg_constraints = { + "low": constraints.dependent(is_discrete=False, event_dim=0), + "high": constraints.dependent(is_discrete=False, event_dim=0), + } + has_rsample = True + + @property + def mean(self): + return (self.high + self.low) / 2 + + @property + def mode(self): + return nan * self.high + + @property + def stddev(self): + return (self.high - self.low) / 12**0.5 + + @property + def variance(self): + return (self.high - self.low).pow(2) / 12 + + def __init__(self, low, high, validate_args=None): + self.low, self.high = broadcast_all(low, high) + + if isinstance(low, Number) and isinstance(high, Number): + batch_shape = torch.Size() + else: + batch_shape = self.low.size() + super().__init__(batch_shape, validate_args=validate_args) + + if self._validate_args and not torch.lt(self.low, self.high).all(): + raise ValueError("Uniform is not defined when low>= high") + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Uniform, _instance) + batch_shape = torch.Size(batch_shape) + new.low = self.low.expand(batch_shape) + new.high = self.high.expand(batch_shape) + super(Uniform, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @constraints.dependent_property(is_discrete=False, event_dim=0) + def support(self): + return constraints.interval(self.low, self.high) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device) + return self.low + rand * (self.high - self.low) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + lb = self.low.le(value).type_as(self.low) + ub = self.high.gt(value).type_as(self.low) + return torch.log(lb.mul(ub)) - torch.log(self.high - self.low) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + result = (value - self.low) / (self.high - self.low) + return result.clamp(min=0, max=1) + + def icdf(self, value): + result = value * (self.high - self.low) + self.low + return result + + def entropy(self): + return torch.log(self.high - self.low) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/utils.py b/venv/lib/python3.10/site-packages/torch/distributions/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6d31a05722c182738ed25584de9ea75890eb28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/utils.py @@ -0,0 +1,177 @@ +from functools import update_wrapper +from numbers import Number +from typing import Any, Dict + +import torch +import torch.nn.functional as F +from torch.overrides import is_tensor_like + +euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant + +__all__ = [ + "broadcast_all", + "logits_to_probs", + "clamp_probs", + "probs_to_logits", + "lazy_property", + "tril_matrix_to_vec", + "vec_to_tril_matrix", +] + + +def broadcast_all(*values): + r""" + Given a list of values (possibly containing numbers), returns a list where each + value is broadcasted based on the following rules: + - `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`. + - numbers.Number instances (scalars) are upcast to tensors having + the same size and type as the first tensor passed to `values`. If all the + values are scalars, then they are upcasted to scalar Tensors. + + Args: + values (list of `numbers.Number`, `torch.*Tensor` or objects implementing __torch_function__) + + Raises: + ValueError: if any of the values is not a `numbers.Number` instance, + a `torch.*Tensor` instance, or an instance implementing __torch_function__ + """ + if not all(is_tensor_like(v) or isinstance(v, Number) for v in values): + raise ValueError( + "Input arguments must all be instances of numbers.Number, " + "torch.Tensor or objects implementing __torch_function__." + ) + if not all(is_tensor_like(v) for v in values): + options: Dict[str, Any] = dict(dtype=torch.get_default_dtype()) + for value in values: + if isinstance(value, torch.Tensor): + options = dict(dtype=value.dtype, device=value.device) + break + new_values = [ + v if is_tensor_like(v) else torch.tensor(v, **options) for v in values + ] + return torch.broadcast_tensors(*new_values) + return torch.broadcast_tensors(*values) + + +def _standard_normal(shape, dtype, device): + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .normal_() + return torch.normal( + torch.zeros(shape, dtype=dtype, device=device), + torch.ones(shape, dtype=dtype, device=device), + ) + return torch.empty(shape, dtype=dtype, device=device).normal_() + + +def _sum_rightmost(value, dim): + r""" + Sum out ``dim`` many rightmost dimensions of a given tensor. + + Args: + value (Tensor): A tensor of ``.dim()`` at least ``dim``. + dim (int): The number of rightmost dims to sum out. + """ + if dim == 0: + return value + required_shape = value.shape[:-dim] + (-1,) + return value.reshape(required_shape).sum(-1) + + +def logits_to_probs(logits, is_binary=False): + r""" + Converts a tensor of logits into probabilities. Note that for the + binary case, each value denotes log odds, whereas for the + multi-dimensional case, the values along the last dimension denote + the log probabilities (possibly unnormalized) of the events. + """ + if is_binary: + return torch.sigmoid(logits) + return F.softmax(logits, dim=-1) + + +def clamp_probs(probs): + eps = torch.finfo(probs.dtype).eps + return probs.clamp(min=eps, max=1 - eps) + + +def probs_to_logits(probs, is_binary=False): + r""" + Converts a tensor of probabilities into logits. For the binary case, + this denotes the probability of occurrence of the event indexed by `1`. + For the multi-dimensional case, the values along the last dimension + denote the probabilities of occurrence of each of the events. + """ + ps_clamped = clamp_probs(probs) + if is_binary: + return torch.log(ps_clamped) - torch.log1p(-ps_clamped) + return torch.log(ps_clamped) + + +class lazy_property: + r""" + Used as a decorator for lazy loading of class attributes. This uses a + non-data descriptor that calls the wrapped method to compute the property on + first call; thereafter replacing the wrapped method into an instance + attribute. + """ + + def __init__(self, wrapped): + self.wrapped = wrapped + update_wrapper(self, wrapped) + + def __get__(self, instance, obj_type=None): + if instance is None: + return _lazy_property_and_property(self.wrapped) + with torch.enable_grad(): + value = self.wrapped(instance) + setattr(instance, self.wrapped.__name__, value) + return value + + +class _lazy_property_and_property(lazy_property, property): + """We want lazy properties to look like multiple things. + + * property when Sphinx autodoc looks + * lazy_property when Distribution validate_args looks + """ + + def __init__(self, wrapped): + property.__init__(self, wrapped) + + +def tril_matrix_to_vec(mat: torch.Tensor, diag: int = 0) -> torch.Tensor: + r""" + Convert a `D x D` matrix or a batch of matrices into a (batched) vector + which comprises of lower triangular elements from the matrix in row order. + """ + n = mat.shape[-1] + if not torch._C._get_tracing_state() and (diag < -n or diag >= n): + raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n-1}].") + arange = torch.arange(n, device=mat.device) + tril_mask = arange < arange.view(-1, 1) + (diag + 1) + vec = mat[..., tril_mask] + return vec + + +def vec_to_tril_matrix(vec: torch.Tensor, diag: int = 0) -> torch.Tensor: + r""" + Convert a vector or a batch of vectors into a batched `D x D` + lower triangular matrix containing elements from the vector in row order. + """ + # +ve root of D**2 + (1+2*diag)*D - |diag| * (diag+1) - 2*vec.shape[-1] = 0 + n = ( + -(1 + 2 * diag) + + ((1 + 2 * diag) ** 2 + 8 * vec.shape[-1] + 4 * abs(diag) * (diag + 1)) ** 0.5 + ) / 2 + eps = torch.finfo(vec.dtype).eps + if not torch._C._get_tracing_state() and (round(n) - n > eps): + raise ValueError( + f"The size of last dimension is {vec.shape[-1]} which cannot be expressed as " + + "the lower triangular part of a square D x D matrix." + ) + n = round(n.item()) if isinstance(n, torch.Tensor) else round(n) + mat = vec.new_zeros(vec.shape[:-1] + torch.Size((n, n))) + arange = torch.arange(n, device=vec.device) + tril_mask = arange < arange.view(-1, 1) + (diag + 1) + mat[..., tril_mask] = vec + return mat diff --git a/venv/lib/python3.10/site-packages/torch/distributions/von_mises.py b/venv/lib/python3.10/site-packages/torch/distributions/von_mises.py new file mode 100644 index 0000000000000000000000000000000000000000..17f52fad25b3de6aa3455e4740269ab050dd4f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/von_mises.py @@ -0,0 +1,209 @@ +import math + +import torch +import torch.jit +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all, lazy_property + +__all__ = ["VonMises"] + + +def _eval_poly(y, coef): + coef = list(coef) + result = coef.pop() + while coef: + result = coef.pop() + y * result + return result + + +_I0_COEF_SMALL = [ + 1.0, + 3.5156229, + 3.0899424, + 1.2067492, + 0.2659732, + 0.360768e-1, + 0.45813e-2, +] +_I0_COEF_LARGE = [ + 0.39894228, + 0.1328592e-1, + 0.225319e-2, + -0.157565e-2, + 0.916281e-2, + -0.2057706e-1, + 0.2635537e-1, + -0.1647633e-1, + 0.392377e-2, +] +_I1_COEF_SMALL = [ + 0.5, + 0.87890594, + 0.51498869, + 0.15084934, + 0.2658733e-1, + 0.301532e-2, + 0.32411e-3, +] +_I1_COEF_LARGE = [ + 0.39894228, + -0.3988024e-1, + -0.362018e-2, + 0.163801e-2, + -0.1031555e-1, + 0.2282967e-1, + -0.2895312e-1, + 0.1787654e-1, + -0.420059e-2, +] + +_COEF_SMALL = [_I0_COEF_SMALL, _I1_COEF_SMALL] +_COEF_LARGE = [_I0_COEF_LARGE, _I1_COEF_LARGE] + + +def _log_modified_bessel_fn(x, order=0): + """ + Returns ``log(I_order(x))`` for ``x > 0``, + where `order` is either 0 or 1. + """ + assert order == 0 or order == 1 + + # compute small solution + y = x / 3.75 + y = y * y + small = _eval_poly(y, _COEF_SMALL[order]) + if order == 1: + small = x.abs() * small + small = small.log() + + # compute large solution + y = 3.75 / x + large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log() + + result = torch.where(x < 3.75, small, large) + return result + + +@torch.jit.script_if_tracing +def _rejection_sample(loc, concentration, proposal_r, x): + done = torch.zeros(x.shape, dtype=torch.bool, device=loc.device) + while not done.all(): + u = torch.rand((3,) + x.shape, dtype=loc.dtype, device=loc.device) + u1, u2, u3 = u.unbind() + z = torch.cos(math.pi * u1) + f = (1 + proposal_r * z) / (proposal_r + z) + c = concentration * (proposal_r - f) + accept = ((c * (2 - c) - u2) > 0) | ((c / u2).log() + 1 - c >= 0) + if accept.any(): + x = torch.where(accept, (u3 - 0.5).sign() * f.acos(), x) + done = done | accept + return (x + math.pi + loc) % (2 * math.pi) - math.pi + + +class VonMises(Distribution): + """ + A circular von Mises distribution. + + This implementation uses polar coordinates. The ``loc`` and ``value`` args + can be any real number (to facilitate unconstrained optimization), but are + interpreted as angles modulo 2 pi. + + Example:: + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = VonMises(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # von Mises distributed with loc=1 and concentration=1 + tensor([1.9777]) + + :param torch.Tensor loc: an angle in radians. + :param torch.Tensor concentration: concentration parameter + """ + + arg_constraints = {"loc": constraints.real, "concentration": constraints.positive} + support = constraints.real + has_rsample = False + + def __init__(self, loc, concentration, validate_args=None): + self.loc, self.concentration = broadcast_all(loc, concentration) + batch_shape = self.loc.shape + event_shape = torch.Size() + super().__init__(batch_shape, event_shape, validate_args) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_prob = self.concentration * torch.cos(value - self.loc) + log_prob = ( + log_prob + - math.log(2 * math.pi) + - _log_modified_bessel_fn(self.concentration, order=0) + ) + return log_prob + + @lazy_property + def _loc(self): + return self.loc.to(torch.double) + + @lazy_property + def _concentration(self): + return self.concentration.to(torch.double) + + @lazy_property + def _proposal_r(self): + kappa = self._concentration + tau = 1 + (1 + 4 * kappa**2).sqrt() + rho = (tau - (2 * tau).sqrt()) / (2 * kappa) + _proposal_r = (1 + rho**2) / (2 * rho) + # second order Taylor expansion around 0 for small kappa + _proposal_r_taylor = 1 / kappa + kappa + return torch.where(kappa < 1e-5, _proposal_r_taylor, _proposal_r) + + @torch.no_grad() + def sample(self, sample_shape=torch.Size()): + """ + The sampling algorithm for the von Mises distribution is based on the + following paper: D.J. Best and N.I. Fisher, "Efficient simulation of the + von Mises distribution." Applied Statistics (1979): 152-157. + + Sampling is always done in double precision internally to avoid a hang + in _rejection_sample() for small values of the concentration, which + starts to happen for single precision around 1e-4 (see issue #88443). + """ + shape = self._extended_shape(sample_shape) + x = torch.empty(shape, dtype=self._loc.dtype, device=self.loc.device) + return _rejection_sample( + self._loc, self._concentration, self._proposal_r, x + ).to(self.loc.dtype) + + def expand(self, batch_shape): + try: + return super().expand(batch_shape) + except NotImplementedError: + validate_args = self.__dict__.get("_validate_args") + loc = self.loc.expand(batch_shape) + concentration = self.concentration.expand(batch_shape) + return type(self)(loc, concentration, validate_args=validate_args) + + @property + def mean(self): + """ + The provided mean is the circular one. + """ + return self.loc + + @property + def mode(self): + return self.loc + + @lazy_property + def variance(self): + """ + The provided variance is the circular one. + """ + return ( + 1 + - ( + _log_modified_bessel_fn(self.concentration, order=1) + - _log_modified_bessel_fn(self.concentration, order=0) + ).exp() + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/weibull.py b/venv/lib/python3.10/site-packages/torch/distributions/weibull.py new file mode 100644 index 0000000000000000000000000000000000000000..39e07d580bc5d3cc9d72a2ce4037a547ec991d6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/weibull.py @@ -0,0 +1,83 @@ +import torch +from torch.distributions import constraints +from torch.distributions.exponential import Exponential +from torch.distributions.gumbel import euler_constant +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, PowerTransform +from torch.distributions.utils import broadcast_all + +__all__ = ["Weibull"] + + +class Weibull(TransformedDistribution): + r""" + Samples from a two-parameter Weibull distribution. + + Example: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1 + tensor([ 0.4784]) + + Args: + scale (float or Tensor): Scale parameter of distribution (lambda). + concentration (float or Tensor): Concentration parameter of distribution (k/shape). + """ + arg_constraints = { + "scale": constraints.positive, + "concentration": constraints.positive, + } + support = constraints.positive + + def __init__(self, scale, concentration, validate_args=None): + self.scale, self.concentration = broadcast_all(scale, concentration) + self.concentration_reciprocal = self.concentration.reciprocal() + base_dist = Exponential( + torch.ones_like(self.scale), validate_args=validate_args + ) + transforms = [ + PowerTransform(exponent=self.concentration_reciprocal), + AffineTransform(loc=0, scale=self.scale), + ] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Weibull, _instance) + new.scale = self.scale.expand(batch_shape) + new.concentration = self.concentration.expand(batch_shape) + new.concentration_reciprocal = new.concentration.reciprocal() + base_dist = self.base_dist.expand(batch_shape) + transforms = [ + PowerTransform(exponent=new.concentration_reciprocal), + AffineTransform(loc=0, scale=new.scale), + ] + super(Weibull, new).__init__(base_dist, transforms, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal)) + + @property + def mode(self): + return ( + self.scale + * ((self.concentration - 1) / self.concentration) + ** self.concentration.reciprocal() + ) + + @property + def variance(self): + return self.scale.pow(2) * ( + torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) + - torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)) + ) + + def entropy(self): + return ( + euler_constant * (1 - self.concentration_reciprocal) + + torch.log(self.scale * self.concentration_reciprocal) + + 1 + ) diff --git a/venv/lib/python3.10/site-packages/torch/mps/__init__.py b/venv/lib/python3.10/site-packages/torch/mps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52cda4fb0c06c0b56c41cd031dea53dcaafc87ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/mps/__init__.py @@ -0,0 +1,130 @@ +r""" +This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python. +Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased +performance can be achieved, by running work on the metal GPU(s). +See https://developer.apple.com/documentation/metalperformanceshaders for more details. +""" +import torch +from .. import Tensor + +_is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False) +_default_mps_generator: torch._C.Generator = None # type: ignore[assignment] + + +# local helper function (not public or exported) +def _get_default_mps_generator() -> torch._C.Generator: + global _default_mps_generator + if _default_mps_generator is None: + _default_mps_generator = torch._C._mps_get_default_generator() + return _default_mps_generator + + +def synchronize() -> None: + r"""Waits for all kernels in all streams on a MPS device to complete.""" + return torch._C._mps_deviceSynchronize() + + +def get_rng_state() -> Tensor: + r"""Returns the random number generator state as a ByteTensor.""" + return _get_default_mps_generator().get_state() + + +def set_rng_state(new_state: Tensor) -> None: + r"""Sets the random number generator state. + + Args: + new_state (torch.ByteTensor): The desired state + """ + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + _get_default_mps_generator().set_state(new_state_copy) + + +def manual_seed(seed: int) -> None: + r"""Sets the seed for generating random numbers. + + Args: + seed (int): The desired seed. + """ + # the torch.mps.manual_seed() can be called from the global + # torch.manual_seed() in torch/random.py. So we need to make + # sure mps is available (otherwise we just return without + # erroring out) + if not torch._C._has_mps: + return + seed = int(seed) + _get_default_mps_generator().manual_seed(seed) + + +def seed() -> None: + r"""Sets the seed for generating random numbers to a random number.""" + _get_default_mps_generator().seed() + + +def empty_cache() -> None: + r"""Releases all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU applications. + """ + torch._C._mps_emptyCache() + + +def set_per_process_memory_fraction(fraction) -> None: + r"""Set memory fraction for limiting process's memory allocation on MPS device. + The allowed value equals the fraction multiplied by recommended maximum device memory + (obtained from Metal API device.recommendedMaxWorkingSetSize). + If trying to allocate more than the allowed value in a process, it will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction. + + .. note:: + Passing 0 to fraction means unlimited allocations + (may cause system failure if out of memory). + Passing fraction greater than 1.0 allows limits beyond the value + returned from device.recommendedMaxWorkingSetSize. + """ + + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 2: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2") + + torch._C._mps_setMemoryFraction(fraction) + + +def current_allocated_memory() -> int: + r"""Returns the current GPU memory occupied by tensors in bytes. + + .. note:: + The returned size does not include cached allocations in + memory pools of MPSAllocator. + """ + return torch._C._mps_currentAllocatedMemory() + + +def driver_allocated_memory() -> int: + r"""Returns total GPU memory allocated by Metal driver for the process in bytes. + + .. note:: + The returned size includes cached allocations in MPSAllocator pools + as well as allocations from MPS/MPSGraph frameworks. + """ + return torch._C._mps_driverAllocatedMemory() + + +from . import profiler +from .event import Event + +__all__ = [ + "get_rng_state", + "manual_seed", + "seed", + "set_rng_state", + "synchronize", + "empty_cache", + "set_per_process_memory_fraction", + "current_allocated_memory", + "driver_allocated_memory", + "Event", + "profiler", +] diff --git a/venv/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..596811c1f930c62610be3dec13dea30a9cc61d98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71fea1deac221fb6f50ac307045e4894a795dec4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eed310d98f6ec64e59b2ab3afdd927868a174187 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/mps/event.py b/venv/lib/python3.10/site-packages/torch/mps/event.py new file mode 100644 index 0000000000000000000000000000000000000000..a206b640ef4ad41c546564d3fa91ba257762c9c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/mps/event.py @@ -0,0 +1,45 @@ +import torch + + +class Event: + r"""Wrapper around an MPS event. + + MPS events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize MPS streams. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + """ + + def __init__(self, enable_timing=False): + self.__eventId = torch._C._mps_acquireEvent(enable_timing) + + def __del__(self): + # checks if torch._C is already destroyed + if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0: + torch._C._mps_releaseEvent(self.__eventId) + + def record(self): + r"""Records the event in the default stream.""" + torch._C._mps_recordEvent(self.__eventId) + + def wait(self): + r"""Makes all future work submitted to the default stream wait for this event.""" + torch._C._mps_waitForEvent(self.__eventId) + + def query(self): + r"""Returns True if all work currently captured by event has completed.""" + return torch._C._mps_queryEvent(self.__eventId) + + def synchronize(self): + r"""Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + """ + torch._C._mps_synchronizeEvent(self.__eventId) + + def elapsed_time(self, end_event): + r"""Returns the time elapsed in milliseconds after the event was + recorded and before the end_event was recorded. + """ + return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId) diff --git a/venv/lib/python3.10/site-packages/torch/mps/profiler.py b/venv/lib/python3.10/site-packages/torch/mps/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..9094a275136c2120a635e97f130fde3d8beb34cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/mps/profiler.py @@ -0,0 +1,59 @@ +import contextlib + +import torch + +__all__ = ["start", "stop", "profile"] + + +def start(mode: str = "interval", wait_until_completed: bool = False) -> None: + r"""Start OS Signpost tracing from MPS backend. + + The generated OS Signposts could be recorded and viewed in + XCode Instruments Logging tool. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + mode_normalized = mode.lower().replace(" ", "") + torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed) + + +def stop(): + r"""Stops generating OS Signpost tracing from MPS backend.""" + torch._C._mps_profilerStopTrace() + + +@contextlib.contextmanager +def profile(mode: str = "interval", wait_until_completed: bool = False): + r"""Context Manager to enabling generating OS Signpost tracing from MPS backend. + + Args: + mode(str): OS Signpost tracing mode could be "interval", "event", + or both "interval,event". + The interval mode traces the duration of execution of the operations, + whereas event mode marks the completion of executions. + See document `Recording Performance Data`_ for more info. + wait_until_completed(bool): Waits until the MPS Stream complete + executing each encoded GPU operation. This helps generating single + dispatches on the trace's timeline. + Note that enabling this option would affect the performance negatively. + + .. _Recording Performance Data: + https://developer.apple.com/documentation/os/logging/recording_performance_data + """ + try: + start(mode, wait_until_completed) + yield + finally: + stop() diff --git a/venv/lib/python3.10/site-packages/torch/package/__init__.py b/venv/lib/python3.10/site-packages/torch/package/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66cace5931ac17c548becfddbb0e56dbbdac3d38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/__init__.py @@ -0,0 +1,12 @@ +from .analyze.is_from_package import is_from_package +from .file_structure_representation import Directory +from .glob_group import GlobGroup +from .importer import ( + Importer, + ObjMismatchError, + ObjNotFoundError, + OrderedImporter, + sys_importer, +) +from .package_exporter import EmptyMatchError, PackageExporter, PackagingError +from .package_importer import PackageImporter diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16505ad2828ae057379429175250f52b21aa141 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c093cb4fde5dcd8b1b6fbd73b222b25f5363fa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..030d969055f48dbc37a382bea2fb4babd8b262b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d17c4fbe55e9b714af50035bb78698eb9a963238 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1365a943b99d460aaac250c23753916fe798a108 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3899f6ca2f469d69b3e245b75bb5df915c2b56f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a36f2444c3e225308bc5ae8ebc25c12723282dc1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b50d5629bdfdd8bfc5d7fddae21c88a6ba31c131 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..447a95e88b5e4c4b80d1edec051822bb4fe5790c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aabd89c6fdf33ab8e4b82d93a371f356e05208e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..667e69e9d86c0bacc8170e3b1bbd763d53b9819a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae796a8e11987ae0c414eb6101e045f751231d29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..667c7be13dbeee82162495df541842a29a6987a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6adc0622e65353a9205cd59bbb27ed0b9098c22c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/_digraph.py b/venv/lib/python3.10/site-packages/torch/package/_digraph.py new file mode 100644 index 0000000000000000000000000000000000000000..f84a51398f005403a8e3a6e5610b5f721a0d4be7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_digraph.py @@ -0,0 +1,173 @@ +from collections import deque +from typing import List, Set + + +class DiGraph: + """Really simple unweighted directed graph data structure to track dependencies. + + The API is pretty much the same as networkx so if you add something just + copy their API. + """ + + def __init__(self): + # Dict of node -> dict of arbitrary attributes + self._node = {} + # Nested dict of node -> successor node -> nothing. + # (didn't implement edge data) + self._succ = {} + # Nested dict of node -> predecessor node -> nothing. + self._pred = {} + + # Keep track of the order in which nodes are added to + # the graph. + self._node_order = {} + self._insertion_idx = 0 + + def add_node(self, n, **kwargs): + """Add a node to the graph. + + Args: + n: the node. Can we any object that is a valid dict key. + **kwargs: any attributes you want to attach to the node. + """ + if n not in self._node: + self._node[n] = kwargs + self._succ[n] = {} + self._pred[n] = {} + self._node_order[n] = self._insertion_idx + self._insertion_idx += 1 + else: + self._node[n].update(kwargs) + + def add_edge(self, u, v): + """Add an edge to graph between nodes ``u`` and ``v`` + + ``u`` and ``v`` will be created if they do not already exist. + """ + # add nodes + self.add_node(u) + self.add_node(v) + + # add the edge + self._succ[u][v] = True + self._pred[v][u] = True + + def successors(self, n): + """Returns an iterator over successor nodes of n.""" + try: + return iter(self._succ[n]) + except KeyError as e: + raise ValueError(f"The node {n} is not in the digraph.") from e + + def predecessors(self, n): + """Returns an iterator over predecessors nodes of n.""" + try: + return iter(self._pred[n]) + except KeyError as e: + raise ValueError(f"The node {n} is not in the digraph.") from e + + @property + def edges(self): + """Returns an iterator over all edges (u, v) in the graph""" + for n, successors in self._succ.items(): + for succ in successors: + yield n, succ + + @property + def nodes(self): + """Returns a dictionary of all nodes to their attributes.""" + return self._node + + def __iter__(self): + """Iterate over the nodes.""" + return iter(self._node) + + def __contains__(self, n): + """Returns True if ``n`` is a node in the graph, False otherwise.""" + try: + return n in self._node + except TypeError: + return False + + def forward_transitive_closure(self, src: str) -> Set[str]: + """Returns a set of nodes that are reachable from src""" + + result = set(src) + working_set = deque(src) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.successors(cur): + if n not in result: + result.add(n) + working_set.append(n) + return result + + def backward_transitive_closure(self, src: str) -> Set[str]: + """Returns a set of nodes that are reachable from src in reverse direction""" + + result = set(src) + working_set = deque(src) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.predecessors(cur): + if n not in result: + result.add(n) + working_set.append(n) + return result + + def all_paths(self, src: str, dst: str): + """Returns a subgraph rooted at src that shows all the paths to dst.""" + + result_graph = DiGraph() + # First compute forward transitive closure of src (all things reachable from src). + forward_reachable_from_src = self.forward_transitive_closure(src) + + if dst not in forward_reachable_from_src: + return result_graph + + # Second walk the reverse dependencies of dst, adding each node to + # the output graph iff it is also present in forward_reachable_from_src. + # we don't use backward_transitive_closures for optimization purposes + working_set = deque(dst) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.predecessors(cur): + if n in forward_reachable_from_src: + result_graph.add_edge(n, cur) + # only explore further if its reachable from src + working_set.append(n) + + return result_graph.to_dot() + + def first_path(self, dst: str) -> List[str]: + """Returns a list of nodes that show the first path that resulted in dst being added to the graph.""" + path = [] + + while dst: + path.append(dst) + candidates = self._pred[dst].keys() + dst, min_idx = "", None + for candidate in candidates: + idx = self._node_order.get(candidate, None) + if idx is None: + break + if min_idx is None or idx < min_idx: + min_idx = idx + dst = candidate + + return list(reversed(path)) + + def to_dot(self) -> str: + """Returns the dot representation of the graph. + + Returns: + A dot representation of the graph. + """ + edges = "\n".join(f'"{f}" -> "{t}";' for f, t in self.edges) + return f"""\ +digraph G {{ +rankdir = LR; +node [shape=box]; +{edges} +}} +""" diff --git a/venv/lib/python3.10/site-packages/torch/package/_directory_reader.py b/venv/lib/python3.10/site-packages/torch/package/_directory_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..cec5333c3e3faf3268555b4d30e6072a3f5de349 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_directory_reader.py @@ -0,0 +1,63 @@ +import os.path +from glob import glob +from typing import cast + +import torch +from torch.types import Storage + +__serialization_id_record_name__ = ".data/serialization_id" + + +# because get_storage_from_record returns a tensor!? +class _HasStorage: + def __init__(self, storage): + self._storage = storage + + def storage(self): + return self._storage + + +class DirectoryReader: + """ + Class to allow PackageImporter to operate on unzipped packages. Methods + copy the behavior of the internal PyTorchFileReader class (which is used for + accessing packages in all other cases). + + N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader + class due to ScriptObjects requiring an actual PyTorchFileReader instance. + """ + + def __init__(self, directory): + self.directory = directory + + def get_record(self, name): + filename = f"{self.directory}/{name}" + with open(filename, "rb") as f: + return f.read() + + def get_storage_from_record(self, name, numel, dtype): + filename = f"{self.directory}/{name}" + nbytes = torch._utils._element_size(dtype) * numel + storage = cast(Storage, torch.UntypedStorage) + return _HasStorage(storage.from_file(filename=filename, nbytes=nbytes)) + + def has_record(self, path): + full_path = os.path.join(self.directory, path) + return os.path.isfile(full_path) + + def get_all_records( + self, + ): + files = [] + for filename in glob(f"{self.directory}/**", recursive=True): + if not os.path.isdir(filename): + files.append(filename[len(self.directory) + 1 :]) + return files + + def serialization_id( + self, + ): + if self.has_record(__serialization_id_record_name__): + return self.get_record(__serialization_id_record_name__) + else: + return "" diff --git a/venv/lib/python3.10/site-packages/torch/package/_importlib.py b/venv/lib/python3.10/site-packages/torch/package/_importlib.py new file mode 100644 index 0000000000000000000000000000000000000000..fd303b6141e7eeeeb891927c063ed5588927388a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_importlib.py @@ -0,0 +1,93 @@ +import _warnings +import os.path + +# note: implementations +# copied from cpython's import code + + +# _zip_searchorder defines how we search for a module in the Zip +# archive: we first search for a package __init__, then for +# non-package .pyc, and .py entries. The .pyc entries +# are swapped by initzipimport() if we run in optimized mode. Also, +# '/' is replaced by path_sep there. + +_zip_searchorder = ( + ("/__init__.py", True), + (".py", False), +) + + +# Replace any occurrences of '\r\n?' in the input string with '\n'. +# This converts DOS and Mac line endings to Unix line endings. +def _normalize_line_endings(source): + source = source.replace(b"\r\n", b"\n") + source = source.replace(b"\r", b"\n") + return source + + +def _resolve_name(name, package, level): + """Resolve a relative module name to an absolute one.""" + bits = package.rsplit(".", level - 1) + if len(bits) < level: + raise ValueError("attempted relative import beyond top-level package") + base = bits[0] + return f"{base}.{name}" if name else base + + +def _sanity_check(name, package, level): + """Verify arguments are "sane".""" + if not isinstance(name, str): + raise TypeError(f"module name must be str, not {type(name)}") + if level < 0: + raise ValueError("level must be >= 0") + if level > 0: + if not isinstance(package, str): + raise TypeError("__package__ not set to a string") + elif not package: + raise ImportError("attempted relative import with no known parent package") + if not name and level == 0: + raise ValueError("Empty module name") + + +def _calc___package__(globals): + """Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + """ + package = globals.get("__package__") + spec = globals.get("__spec__") + if package is not None: + if spec is not None and package != spec.parent: + _warnings.warn( # noqa: G010 + f"__package__ != __spec__.parent ({package!r} != {spec.parent!r})", # noqa: G004 + ImportWarning, + stacklevel=3, + ) + return package + elif spec is not None: + return spec.parent + else: + _warnings.warn( # noqa: G010 + "can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + ImportWarning, + stacklevel=3, + ) + package = globals["__name__"] + if "__path__" not in globals: + package = package.rpartition(".")[0] + return package + + +def _normalize_path(path): + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + parent, file_name = os.path.split(path) + if parent: + raise ValueError(f"{path!r} must be only a file name") + else: + return file_name diff --git a/venv/lib/python3.10/site-packages/torch/package/_mangling.py b/venv/lib/python3.10/site-packages/torch/package/_mangling.py new file mode 100644 index 0000000000000000000000000000000000000000..0876d64664a2726bcb58d50b49326889f030f34e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_mangling.py @@ -0,0 +1,62 @@ +"""Import mangling. +See mangling.md for details. +""" +import re + +_mangle_index = 0 + + +class PackageMangler: + """ + Used on import, to ensure that all modules imported have a shared mangle parent. + """ + + def __init__(self): + global _mangle_index + self._mangle_index = _mangle_index + # Increment the global index + _mangle_index += 1 + # Angle brackets are used so that there is almost no chance of + # confusing this module for a real module. Plus, it is Python's + # preferred way of denoting special modules. + self._mangle_parent = f"" + + def mangle(self, name) -> str: + assert len(name) != 0 + return self._mangle_parent + "." + name + + def demangle(self, mangled: str) -> str: + """ + Note: This only demangles names that were mangled by this specific + PackageMangler. It will pass through names created by a different + PackageMangler instance. + """ + if mangled.startswith(self._mangle_parent + "."): + return mangled.partition(".")[2] + + # wasn't a mangled name + return mangled + + def parent_name(self): + return self._mangle_parent + + +def is_mangled(name: str) -> bool: + return bool(re.match(r"", name)) + + +def demangle(name: str) -> str: + """ + Note: Unlike PackageMangler.demangle, this version works on any + mangled name, irrespective of which PackageMangler created it. + """ + if is_mangled(name): + first, sep, last = name.partition(".") + # If there is only a base mangle prefix, e.g. '', + # then return an empty string. + return last if len(sep) != 0 else "" + return name + + +def get_mangle_prefix(name: str) -> str: + return name.partition(".")[0] if is_mangled(name) else name diff --git a/venv/lib/python3.10/site-packages/torch/package/_mock.py b/venv/lib/python3.10/site-packages/torch/package/_mock.py new file mode 100644 index 0000000000000000000000000000000000000000..b0bdb95cc48c4b1bfaa9edc07fceb1b16d733752 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_mock.py @@ -0,0 +1,122 @@ +_magic_methods = [ + "__subclasscheck__", + "__hex__", + "__rmul__", + "__float__", + "__idiv__", + "__setattr__", + "__div__", + "__invert__", + "__nonzero__", + "__rshift__", + "__eq__", + "__pos__", + "__round__", + "__rand__", + "__or__", + "__complex__", + "__divmod__", + "__len__", + "__reversed__", + "__copy__", + "__reduce__", + "__deepcopy__", + "__rdivmod__", + "__rrshift__", + "__ifloordiv__", + "__hash__", + "__iand__", + "__xor__", + "__isub__", + "__oct__", + "__ceil__", + "__imod__", + "__add__", + "__truediv__", + "__unicode__", + "__le__", + "__delitem__", + "__sizeof__", + "__sub__", + "__ne__", + "__pow__", + "__bytes__", + "__mul__", + "__itruediv__", + "__bool__", + "__iter__", + "__abs__", + "__gt__", + "__iadd__", + "__enter__", + "__floordiv__", + "__call__", + "__neg__", + "__and__", + "__ixor__", + "__getitem__", + "__exit__", + "__cmp__", + "__getstate__", + "__index__", + "__contains__", + "__floor__", + "__lt__", + "__getattr__", + "__mod__", + "__trunc__", + "__delattr__", + "__instancecheck__", + "__setitem__", + "__ipow__", + "__ilshift__", + "__long__", + "__irshift__", + "__imul__", + "__lshift__", + "__dir__", + "__ge__", + "__int__", + "__ior__", +] + + +class MockedObject: + _name: str + + def __new__(cls, *args, **kwargs): + # _suppress_err is set by us in the mocked module impl, so that we can + # construct instances of MockedObject to hand out to people looking up + # module attributes. + + # Any other attempt to construct a MockedObject instance (say, in the + # unpickling process) should give an error. + if not kwargs.get("_suppress_err"): + raise NotImplementedError( + f"Object '{cls._name}' was mocked out during packaging " + f"but it is being used in '__new__'. If this error is " + "happening during 'load_pickle', please ensure that your " + "pickled object doesn't contain any mocked objects." + ) + # Otherwise, this is just a regular object creation + # (e.g. `x = MockedObject("foo")`), so pass it through normally. + return super().__new__(cls) + + def __init__(self, name: str, _suppress_err: bool): + self.__dict__["_name"] = name + + def __repr__(self): + return f"MockedObject({self._name})" + + +def install_method(method_name): + def _not_implemented(self, *args, **kwargs): + raise NotImplementedError( + f"Object '{self._name}' was mocked out during packaging but it is being used in {method_name}" + ) + + setattr(MockedObject, method_name, _not_implemented) + + +for method_name in _magic_methods: + install_method(method_name) diff --git a/venv/lib/python3.10/site-packages/torch/package/_package_pickler.py b/venv/lib/python3.10/site-packages/torch/package/_package_pickler.py new file mode 100644 index 0000000000000000000000000000000000000000..cabc6a82164fb3aaf767f14cf60bca58535fcf61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_package_pickler.py @@ -0,0 +1,118 @@ +"""isort:skip_file""" +from pickle import ( # type: ignore[attr-defined] + _compat_pickle, + _extension_registry, + _getattribute, + _Pickler, + EXT1, + EXT2, + EXT4, + GLOBAL, + Pickler, + PicklingError, + STACK_GLOBAL, +) +from struct import pack +from types import FunctionType + +from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer + + +class PackagePickler(_Pickler): + """Package-aware pickler. + + This behaves the same as a normal pickler, except it uses an `Importer` + to find objects and modules to save. + """ + + def __init__(self, importer: Importer, *args, **kwargs): + self.importer = importer + super().__init__(*args, **kwargs) + + # Make sure the dispatch table copied from _Pickler is up-to-date. + # Previous issues have been encountered where a library (e.g. dill) + # mutate _Pickler.dispatch, PackagePickler makes a copy when this lib + # is imported, then the offending library removes its dispatch entries, + # leaving PackagePickler with a stale dispatch table that may cause + # unwanted behavior. + self.dispatch = _Pickler.dispatch.copy() # type: ignore[misc] + self.dispatch[FunctionType] = PackagePickler.save_global # type: ignore[assignment] + + def save_global(self, obj, name=None): + # unfortunately the pickler code is factored in a way that + # forces us to copy/paste this function. The only change is marked + # CHANGED below. + write = self.write # type: ignore[attr-defined] + memo = self.memo # type: ignore[attr-defined] + + # CHANGED: import module from module environment instead of __import__ + try: + module_name, name = self.importer.get_name(obj, name) + except (ObjNotFoundError, ObjMismatchError) as err: + raise PicklingError(f"Can't pickle {obj}: {str(err)}") from None + + module = self.importer.import_module(module_name) + _, parent = _getattribute(module, name) + # END CHANGED + + if self.proto >= 2: # type: ignore[attr-defined] + code = _extension_registry.get((module_name, name)) + if code: + assert code > 0 + if code <= 0xFF: + write(EXT1 + pack("= 3. + if self.proto >= 4: # type: ignore[attr-defined] + self.save(module_name) # type: ignore[attr-defined] + self.save(name) # type: ignore[attr-defined] + write(STACK_GLOBAL) + elif parent is not module: + self.save_reduce(getattr, (parent, lastname)) # type: ignore[attr-defined] + elif self.proto >= 3: # type: ignore[attr-defined] + write( + GLOBAL + + bytes(module_name, "utf-8") + + b"\n" + + bytes(name, "utf-8") + + b"\n" + ) + else: + if self.fix_imports: # type: ignore[attr-defined] + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + elif module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + try: + write( + GLOBAL + + bytes(module_name, "ascii") + + b"\n" + + bytes(name, "ascii") + + b"\n" + ) + except UnicodeEncodeError: + raise PicklingError( + "can't pickle global identifier '%s.%s' using " + "pickle protocol %i" % (module, name, self.proto) # type: ignore[attr-defined] + ) from None + + self.memoize(obj) # type: ignore[attr-defined] + + +def create_pickler(data_buf, importer, protocol=4): + if importer is sys_importer: + # if we are using the normal import library system, then + # we can use the C implementation of pickle which is faster + return Pickler(data_buf, protocol=protocol) + else: + return PackagePickler(importer, data_buf, protocol=protocol) diff --git a/venv/lib/python3.10/site-packages/torch/package/_package_unpickler.py b/venv/lib/python3.10/site-packages/torch/package/_package_unpickler.py new file mode 100644 index 0000000000000000000000000000000000000000..b00210e3c191e5dfaf94170e083b56ebc44d5bf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_package_unpickler.py @@ -0,0 +1,26 @@ +import _compat_pickle +import pickle + +from .importer import Importer + + +class PackageUnpickler(pickle._Unpickler): # type: ignore[name-defined] + """Package-aware unpickler. + + This behaves the same as a normal unpickler, except it uses `importer` to + find any global names that it encounters while unpickling. + """ + + def __init__(self, importer: Importer, *args, **kwargs): + super().__init__(*args, **kwargs) + self._importer = importer + + def find_class(self, module, name): + # Subclasses may override this. + if self.proto < 3 and self.fix_imports: # type: ignore[attr-defined] + if (module, name) in _compat_pickle.NAME_MAPPING: + module, name = _compat_pickle.NAME_MAPPING[(module, name)] + elif module in _compat_pickle.IMPORT_MAPPING: + module = _compat_pickle.IMPORT_MAPPING[module] + mod = self._importer.import_module(module) + return getattr(mod, name) diff --git a/venv/lib/python3.10/site-packages/torch/package/_stdlib.py b/venv/lib/python3.10/site-packages/torch/package/_stdlib.py new file mode 100644 index 0000000000000000000000000000000000000000..a810d50661cb3ded86ee42ce623ec660276a754a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/_stdlib.py @@ -0,0 +1,464 @@ +"""List of Python standard library modules. + +Sadly, there is no reliable way to tell whether a module is part of the +standard library except by comparing to a canonical list. + +This is taken from https://github.com/PyCQA/isort/tree/develop/isort/stdlibs, +which itself is sourced from the Python documentation. +""" + +import sys + + +def is_stdlib_module(module: str) -> bool: + base_module = module.partition(".")[0] + return base_module in _get_stdlib_modules() + + +def _get_stdlib_modules(): + if sys.version_info.major == 3: + if sys.version_info.minor == 8: + return stdlib3_8 + if sys.version_info.minor == 9: + return stdlib3_9 + if sys.version_info.minor >= 10: + return sys.stdlib_module_names # type: ignore[attr-defined] + elif sys.version_info.major > 3: + return sys.stdlib_module_names # type: ignore[attr-defined] + + raise RuntimeError(f"Unsupported Python version: {sys.version_info}") + + +stdlib3_8 = { + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} + +stdlib3_9 = { + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "graphlib", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", + "zoneinfo", +} diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/__init__.py b/venv/lib/python3.10/site-packages/torch/package/analyze/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef7a1716af241e21f97f593abde2a2b75960814 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/analyze/__init__.py @@ -0,0 +1,2 @@ +from .find_first_use_of_broken_modules import find_first_use_of_broken_modules +from .trace_dependencies import trace_dependencies diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5b4c20cba945171b2d057a5856fa90c73bae8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fabe08a98b78579c301ecd0dc015a77910aec043 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c131956319accc4d86e62945115dc14db230ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1bddcd7ed3361fb062b8f02e80118626e759752 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py b/venv/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..1910afdd98e34471325c087eacacfd05e98c3df1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py @@ -0,0 +1,31 @@ +from typing import Dict, List + +from ..package_exporter import PackagingError + +__all__ = ["find_first_use_of_broken_modules"] + + +def find_first_use_of_broken_modules(exc: PackagingError) -> Dict[str, List[str]]: + """ + Find all broken modules in a PackagingError, and for each one, return the + dependency path in which the module was first encountered. + + E.g. broken module m.n.o was added to a dependency graph while processing a.b.c, + then re-encountered while processing d.e.f. This method would return + {'m.n.o': ['a', 'b', 'c']} + + Args: + exc: a PackagingError + + Returns: A dict from broken module names to lists of module names in the path. + """ + + assert isinstance(exc, PackagingError), "exception must be a PackagingError" + uses = {} + broken_module_names = [ + m for m, attr in exc.dependency_graph.nodes.items() if attr.get("error", False) + ] + for module_name in broken_module_names: + path = exc.dependency_graph.first_path(module_name) + uses[module_name] = path + return uses diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py b/venv/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py new file mode 100644 index 0000000000000000000000000000000000000000..82ff5896b6ffcc2dcb7b15dc169729aceb8b1d75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py @@ -0,0 +1,16 @@ +from types import ModuleType +from typing import Any + +from .._mangling import is_mangled + + +def is_from_package(obj: Any) -> bool: + """ + Return whether an object was loaded from a package. + + Note: packaged objects from externed modules will return ``False``. + """ + if type(obj) == ModuleType: + return is_mangled(obj.__name__) + else: + return is_mangled(type(obj).__module__) diff --git a/venv/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py b/venv/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..9f882fb33481e3a81c6afa061adf507d79c92628 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py @@ -0,0 +1,62 @@ +import sys +from typing import Any, Callable, Iterable, List, Tuple + +__all__ = ["trace_dependencies"] + + +def trace_dependencies( + callable: Callable[[Any], Any], inputs: Iterable[Tuple[Any, ...]] +) -> List[str]: + """Trace the execution of a callable in order to determine which modules it uses. + + Args: + callable: The callable to execute and trace. + inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs + are union-ed to determine all modules used by the callable for the purpooses of packaging. + + Returns: A list of the names of all modules used during callable execution. + """ + modules_used = set() + + def record_used_modules(frame, event, arg): + # If the event being profiled is not a Python function + # call, there is nothing to do. + if event != "call": + return + + # This is the name of the function that was called. + name = frame.f_code.co_name + module = None + + # Try to determine the name of the module that the function + # is in: + # 1) Check the global namespace of the frame. + # 2) Check the local namespace of the frame. + # 3) To handle class instance method calls, check + # the attribute named 'name' of the object + # in the local namespace corresponding to "self". + if name in frame.f_globals: + module = frame.f_globals[name].__module__ + elif name in frame.f_locals: + module = frame.f_locals[name].__module__ + elif "self" in frame.f_locals: + method = getattr(frame.f_locals["self"], name, None) + module = method.__module__ if method else None + + # If a module was found, add it to the set of used modules. + if module: + modules_used.add(module) + + try: + # Attach record_used_modules as the profiler function. + sys.setprofile(record_used_modules) + + # Execute the callable with all inputs. + for inp in inputs: + callable(*inp) + + finally: + # Detach the profiler function. + sys.setprofile(None) + + return list(modules_used) diff --git a/venv/lib/python3.10/site-packages/torch/package/file_structure_representation.py b/venv/lib/python3.10/site-packages/torch/package/file_structure_representation.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5f055c1a20ef14a47e6a91127931b2a6ccebfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/file_structure_representation.py @@ -0,0 +1,133 @@ +from typing import Dict, List + +from .glob_group import GlobGroup, GlobPattern + +__all__ = ["Directory"] + + +class Directory: + """A file structure representation. Organized as Directory nodes that have lists of + their Directory children. Directories for a package are created by calling + :meth:`PackageImporter.file_structure`.""" + + def __init__(self, name: str, is_dir: bool): + self.name = name + self.is_dir = is_dir + self.children: Dict[str, Directory] = {} + + def _get_dir(self, dirs: List[str]) -> "Directory": + """Builds path of Directories if not yet built and returns last directory + in list. + + Args: + dirs (List[str]): List of directory names that are treated like a path. + + Returns: + :class:`Directory`: The last Directory specified in the dirs list. + """ + if len(dirs) == 0: + return self + dir_name = dirs[0] + if dir_name not in self.children: + self.children[dir_name] = Directory(dir_name, True) + return self.children[dir_name]._get_dir(dirs[1:]) + + def _add_file(self, file_path: str): + """Adds a file to a Directory. + + Args: + file_path (str): Path of file to add. Last element is added as a file while + other paths items are added as directories. + """ + *dirs, file = file_path.split("/") + dir = self._get_dir(dirs) + dir.children[file] = Directory(file, False) + + def has_file(self, filename: str) -> bool: + """Checks if a file is present in a :class:`Directory`. + + Args: + filename (str): Path of file to search for. + Returns: + bool: If a :class:`Directory` contains the specified file. + """ + lineage = filename.split("/", maxsplit=1) + child = lineage[0] + grandchildren = lineage[1] if len(lineage) > 1 else None + if child in self.children.keys(): + if grandchildren is None: + return True + else: + return self.children[child].has_file(grandchildren) + return False + + def __str__(self): + str_list: List[str] = [] + self._stringify_tree(str_list) + return "".join(str_list) + + def _stringify_tree( + self, str_list: List[str], preamble: str = "", dir_ptr: str = "─── " + ): + """Recursive method to generate print-friendly version of a Directory.""" + space = " " + branch = "│ " + tee = "├── " + last = "└── " + + # add this directory's representation + str_list.append(f"{preamble}{dir_ptr}{self.name}\n") + + # add directory's children representations + if dir_ptr == tee: + preamble = preamble + branch + else: + preamble = preamble + space + + file_keys: List[str] = [] + dir_keys: List[str] = [] + for key, val in self.children.items(): + if val.is_dir: + dir_keys.append(key) + else: + file_keys.append(key) + + for index, key in enumerate(sorted(dir_keys)): + if (index == len(dir_keys) - 1) and len(file_keys) == 0: + self.children[key]._stringify_tree(str_list, preamble, last) + else: + self.children[key]._stringify_tree(str_list, preamble, tee) + for index, file in enumerate(sorted(file_keys)): + pointer = last if (index == len(file_keys) - 1) else tee + str_list.append(f"{preamble}{pointer}{file}\n") + + +def _create_directory_from_file_list( + filename: str, + file_list: List[str], + include: "GlobPattern" = "**", + exclude: "GlobPattern" = (), +) -> Directory: + """Return a :class:`Directory` file structure representation created from a list of files. + + Args: + filename (str): The name given to the top-level directory that will be the + relative root for all file paths found in the file_list. + + file_list (List[str]): List of files to add to the top-level directory. + + include (Union[List[str], str]): An optional pattern that limits what is included from the file_list to + files whose name matches the pattern. + + exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern. + + Returns: + :class:`Directory`: a :class:`Directory` file structure representation created from a list of files. + """ + glob_pattern = GlobGroup(include, exclude=exclude, separator="/") + + top_dir = Directory(filename, True) + for file in file_list: + if glob_pattern.matches(file): + top_dir._add_file(file) + return top_dir diff --git a/venv/lib/python3.10/site-packages/torch/package/find_file_dependencies.py b/venv/lib/python3.10/site-packages/torch/package/find_file_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..af8cd9fec84deb31b853045ecd077d58d45384fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/find_file_dependencies.py @@ -0,0 +1,95 @@ +import ast +from typing import List, Optional, Tuple + +from ._importlib import _resolve_name + + +class _ExtractModuleReferences(ast.NodeVisitor): + """ + Extract the list of global variables a block of code will read and write + """ + + @classmethod + def run(cls, src: str, package: str) -> List[Tuple[str, Optional[str]]]: + visitor = cls(package) + tree = ast.parse(src) + visitor.visit(tree) + return list(visitor.references.keys()) + + def __init__(self, package): + super().__init__() + self.package = package + self.references = {} + + def _absmodule(self, module_name: str, level: int) -> str: + if level > 0: + return _resolve_name(module_name, self.package, level) + return module_name + + def visit_Import(self, node): + for alias in node.names: + self.references[(alias.name, None)] = True + + def visit_ImportFrom(self, node): + name = self._absmodule(node.module, 0 if node.level is None else node.level) + for alias in node.names: + # from my_package import foo + # foo may be a module, so we have to add it to the list of + # potential references, if import of it fails, we will ignore it + if alias.name != "*": + self.references[(name, alias.name)] = True + else: + self.references[(name, None)] = True + + def _grab_node_int(self, node): + return node.value + + def _grab_node_str(self, node): + return node.value + + def visit_Call(self, node): + # __import__ calls aren't routed to the visit_Import/From nodes + if hasattr(node.func, "id") and node.func.id == "__import__": + try: + name = self._grab_node_str(node.args[0]) + fromlist = [] + level = 0 + if len(node.args) > 3: + for v in node.args[3].elts: + fromlist.append(self._grab_node_str(v)) + elif hasattr(node, "keywords"): + for keyword in node.keywords: + if keyword.arg == "fromlist": + for v in keyword.value.elts: + fromlist.append(self._grab_node_str(v)) + if len(node.args) > 4: + level = self._grab_node_int(node.args[4]) + elif hasattr(node, "keywords"): + for keyword in node.keywords: + if keyword.arg == "level": + level = self._grab_node_int(keyword.value) + if fromlist == []: + # the top-level package (the name up till the first dot) is returned + # when the fromlist argument is empty in normal import system, + # we need to include top level package to match this behavior and last + # level package to capture the intended dependency of user + self.references[(name, None)] = True + top_name = name.rsplit(".", maxsplit=1)[0] + if top_name != name: + top_name = self._absmodule(top_name, level) + self.references[(top_name, None)] = True + else: + name = self._absmodule(name, level) + for alias in fromlist: + # fromlist args may be submodules, so we have to add the fromlist args + # to the list of potential references. If import of an arg fails we + # will ignore it, similar to visit_ImportFrom + if alias != "*": + self.references[(name, alias)] = True + else: + self.references[(name, None)] = True + except Exception as e: + return + + +find_files_source_depends_on = _ExtractModuleReferences.run diff --git a/venv/lib/python3.10/site-packages/torch/package/glob_group.py b/venv/lib/python3.10/site-packages/torch/package/glob_group.py new file mode 100644 index 0000000000000000000000000000000000000000..a8434788d016fd64cb03e1cf5cdebea5d65d6a59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/glob_group.py @@ -0,0 +1,82 @@ +import re +from typing import Iterable, Union + +GlobPattern = Union[str, Iterable[str]] + + +class GlobGroup: + """A set of patterns that candidate strings will be matched against. + + A candidate is composed of a list of segments separated by ``separator``, e.g. "foo.bar.baz". + + A pattern contains one or more segments. Segments can be: + - A literal string (e.g. "foo"), which matches exactly. + - A string containing a wildcard (e.g. "torch*", or "foo*baz*"). The wildcard matches + any string, including the empty string. + - A double wildcard ("**"). This matches against zero or more complete segments. + + Examples: + ``torch.**``: matches ``torch`` and all its submodules, e.g. ``torch.nn`` and ``torch.nn.functional``. + ``torch.*``: matches ``torch.nn`` or ``torch.functional``, but not ``torch.nn.functional``. + ``torch*.**``: matches ``torch``, ``torchvision``, and all their submodules. + + A candidates will match the ``GlobGroup`` if it matches any of the ``include`` patterns and + none of the ``exclude`` patterns. + + Args: + include (Union[str, Iterable[str]]): A string or list of strings, + each representing a pattern to be matched against. A candidate + will match if it matches *any* include pattern + exclude (Union[str, Iterable[str]]): A string or list of strings, + each representing a pattern to be matched against. A candidate + will be excluded from matching if it matches *any* exclude pattern. + separator (str): A string that delimits segments in candidates and + patterns. By default this is "." which corresponds to how modules are + named in Python. Another common value for this is "/", which is + the Unix path separator. + """ + + def __init__( + self, include: GlobPattern, *, exclude: GlobPattern = (), separator: str = "." + ): + self._dbg = f"GlobGroup(include={include}, exclude={exclude})" + self.include = GlobGroup._glob_list(include, separator) + self.exclude = GlobGroup._glob_list(exclude, separator) + self.separator = separator + + def __str__(self): + return self._dbg + + def __repr__(self): + return self._dbg + + def matches(self, candidate: str) -> bool: + candidate = self.separator + candidate + return any(p.fullmatch(candidate) for p in self.include) and all( + not p.fullmatch(candidate) for p in self.exclude + ) + + @staticmethod + def _glob_list(elems: GlobPattern, separator: str = "."): + if isinstance(elems, str): + return [GlobGroup._glob_to_re(elems, separator)] + else: + return [GlobGroup._glob_to_re(e, separator) for e in elems] + + @staticmethod + def _glob_to_re(pattern: str, separator: str = "."): + # to avoid corner cases for the first component, we prefix the candidate string + # with '.' so `import torch` will regex against `.torch`, assuming '.' is the separator + def component_to_re(component): + if "**" in component: + if component == "**": + return "(" + re.escape(separator) + "[^" + separator + "]+)*" + else: + raise ValueError("** can only appear as an entire path segment") + else: + return re.escape(separator) + ("[^" + separator + "]*").join( + re.escape(x) for x in component.split("*") + ) + + result = "".join(component_to_re(c) for c in pattern.split(separator)) + return re.compile(result) diff --git a/venv/lib/python3.10/site-packages/torch/package/importer.py b/venv/lib/python3.10/site-packages/torch/package/importer.py new file mode 100644 index 0000000000000000000000000000000000000000..dd01d09209a8c69e8c7c03a724397c358a1c96ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/importer.py @@ -0,0 +1,237 @@ +import importlib +from abc import ABC, abstractmethod +from pickle import ( # type: ignore[attr-defined] # type: ignore[attr-defined] + _getattribute, + _Pickler, + whichmodule as _pickle_whichmodule, +) +from types import ModuleType +from typing import Any, Dict, List, Optional, Tuple + +from ._mangling import demangle, get_mangle_prefix, is_mangled + +__all__ = ["ObjNotFoundError", "ObjMismatchError", "Importer", "OrderedImporter"] + + +class ObjNotFoundError(Exception): + """Raised when an importer cannot find an object by searching for its name.""" + + pass + + +class ObjMismatchError(Exception): + """Raised when an importer found a different object with the same name as the user-provided one.""" + + pass + + +class Importer(ABC): + """Represents an environment to import modules from. + + By default, you can figure out what module an object belongs by checking + __module__ and importing the result using __import__ or importlib.import_module. + + torch.package introduces module importers other than the default one. + Each PackageImporter introduces a new namespace. Potentially a single + name (e.g. 'foo.bar') is present in multiple namespaces. + + It supports two main operations: + import_module: module_name -> module object + get_name: object -> (parent module name, name of obj within module) + + The guarantee is that following round-trip will succeed or throw an ObjNotFoundError/ObjMisMatchError. + module_name, obj_name = env.get_name(obj) + module = env.import_module(module_name) + obj2 = getattr(module, obj_name) + assert obj1 is obj2 + """ + + modules: Dict[str, ModuleType] + + @abstractmethod + def import_module(self, module_name: str) -> ModuleType: + """Import `module_name` from this environment. + + The contract is the same as for importlib.import_module. + """ + pass + + def get_name(self, obj: Any, name: Optional[str] = None) -> Tuple[str, str]: + """Given an object, return a name that can be used to retrieve the + object from this environment. + + Args: + obj: An object to get the module-environment-relative name for. + name: If set, use this name instead of looking up __name__ or __qualname__ on `obj`. + This is only here to match how Pickler handles __reduce__ functions that return a string, + don't use otherwise. + Returns: + A tuple (parent_module_name, attr_name) that can be used to retrieve `obj` from this environment. + Use it like: + mod = importer.import_module(parent_module_name) + obj = getattr(mod, attr_name) + + Raises: + ObjNotFoundError: we couldn't retrieve `obj by name. + ObjMisMatchError: we found a different object with the same name as `obj`. + """ + if name is None and obj and _Pickler.dispatch.get(type(obj)) is None: + # Honor the string return variant of __reduce__, which will give us + # a global name to search for in this environment. + # TODO: I guess we should do copyreg too? + reduce = getattr(obj, "__reduce__", None) + if reduce is not None: + try: + rv = reduce() + if isinstance(rv, str): + name = rv + except Exception: + pass + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: + name = obj.__name__ + + orig_module_name = self.whichmodule(obj, name) + # Demangle the module name before importing. If this obj came out of a + # PackageImporter, `__module__` will be mangled. See mangling.md for + # details. + module_name = demangle(orig_module_name) + + # Check that this name will indeed return the correct object + try: + module = self.import_module(module_name) + obj2, _ = _getattribute(module, name) + except (ImportError, KeyError, AttributeError): + raise ObjNotFoundError( + f"{obj} was not found as {module_name}.{name}" + ) from None + + if obj is obj2: + return module_name, name + + def get_obj_info(obj): + assert name is not None + module_name = self.whichmodule(obj, name) + is_mangled_ = is_mangled(module_name) + location = ( + get_mangle_prefix(module_name) + if is_mangled_ + else "the current Python environment" + ) + importer_name = ( + f"the importer for {get_mangle_prefix(module_name)}" + if is_mangled_ + else "'sys_importer'" + ) + return module_name, location, importer_name + + obj_module_name, obj_location, obj_importer_name = get_obj_info(obj) + obj2_module_name, obj2_location, obj2_importer_name = get_obj_info(obj2) + msg = ( + f"\n\nThe object provided is from '{obj_module_name}', " + f"which is coming from {obj_location}." + f"\nHowever, when we import '{obj2_module_name}', it's coming from {obj2_location}." + "\nTo fix this, make sure this 'PackageExporter's importer lists " + f"{obj_importer_name} before {obj2_importer_name}." + ) + raise ObjMismatchError(msg) + + def whichmodule(self, obj: Any, name: str) -> str: + """Find the module name an object belongs to. + + This should be considered internal for end-users, but developers of + an importer can override it to customize the behavior. + + Taken from pickle.py, but modified to exclude the search into sys.modules + """ + module_name = getattr(obj, "__module__", None) + if module_name is not None: + return module_name + + # Protect the iteration by using a list copy of self.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for module_name, module in self.modules.copy().items(): + if ( + module_name == "__main__" + or module_name == "__mp_main__" # bpo-42406 + or module is None + ): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except AttributeError: + pass + + return "__main__" + + +class _SysImporter(Importer): + """An importer that implements the default behavior of Python.""" + + def import_module(self, module_name: str): + return importlib.import_module(module_name) + + def whichmodule(self, obj: Any, name: str) -> str: + return _pickle_whichmodule(obj, name) + + +sys_importer = _SysImporter() + + +class OrderedImporter(Importer): + """A compound importer that takes a list of importers and tries them one at a time. + + The first importer in the list that returns a result "wins". + """ + + def __init__(self, *args): + self._importers: List[Importer] = list(args) + + def _is_torchpackage_dummy(self, module): + """Returns true iff this module is an empty PackageNode in a torch.package. + + If you intern `a.b` but never use `a` in your code, then `a` will be an + empty module with no source. This can break cases where we are trying to + re-package an object after adding a real dependency on `a`, since + OrderedImportere will resolve `a` to the dummy package and stop there. + + See: https://github.com/pytorch/pytorch/pull/71520#issuecomment-1029603769 + """ + if not getattr(module, "__torch_package__", False): + return False + if not hasattr(module, "__path__"): + return False + if not hasattr(module, "__file__"): + return True + return module.__file__ is None + + def import_module(self, module_name: str) -> ModuleType: + last_err = None + for importer in self._importers: + if not isinstance(importer, Importer): + raise TypeError( + f"{importer} is not a Importer. " + "All importers in OrderedImporter must inherit from Importer." + ) + try: + module = importer.import_module(module_name) + if self._is_torchpackage_dummy(module): + continue + return module + except ModuleNotFoundError as err: + last_err = err + + if last_err is not None: + raise last_err + else: + raise ModuleNotFoundError(module_name) + + def whichmodule(self, obj: Any, name: str) -> str: + for importer in self._importers: + module_name = importer.whichmodule(obj, name) + if module_name != "__main__": + return module_name + + return "__main__" diff --git a/venv/lib/python3.10/site-packages/torch/package/package_exporter.py b/venv/lib/python3.10/site-packages/torch/package/package_exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..37313487d70e9f007d887acc65695b9d52ead2b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/package_exporter.py @@ -0,0 +1,1199 @@ +import collections +import importlib.machinery +import io +import linecache +import pickletools +import platform +import types +from collections import defaultdict, OrderedDict +from dataclasses import dataclass +from enum import Enum +from importlib.machinery import SourceFileLoader +from pathlib import Path +from typing import ( + Any, + BinaryIO, + Callable, + cast, + DefaultDict, + Dict, + List, + Optional, + Sequence, + Set, + Union, +) + +import torch +from torch.serialization import location_tag, normalize_storage_type +from torch.types import Storage +from torch.utils.hooks import RemovableHandle + +from ._digraph import DiGraph +from ._importlib import _normalize_path +from ._mangling import demangle, is_mangled +from ._package_pickler import create_pickler +from ._stdlib import is_stdlib_module +from .find_file_dependencies import find_files_source_depends_on +from .glob_group import GlobGroup, GlobPattern +from .importer import Importer, OrderedImporter, sys_importer + +__all__ = [ + "PackagingErrorReason", + "EmptyMatchError", + "PackagingError", + "PackageExporter", +] + +_gate_torchscript_serialization = True + +ActionHook = Callable[["PackageExporter", str], None] + + +class _ModuleProviderAction(Enum): + """Represents one of the actions that :class:`PackageExporter` can take on a module. + + See :meth:`PackageExporter.extern` and friends for a description of what the actions do. + """ + + INTERN = 1 + EXTERN = 2 + MOCK = 3 + DENY = 4 + # Special case: when a module is mocked, PackageExporter writes out a + # `_mock` module that implements our mocking stubs. If we re-package code, + # we may encounter a `_mock` module from the original package. If we do, + # just ignore it and write a `_mock` module once. + REPACKAGED_MOCK_MODULE = 5 + # Special case: PackageImporter adds a fake module + # (`torch_package_importer`) that allows packaged code to access it. Don't + # re-export this. + SKIP = 6 + + +class PackagingErrorReason(Enum): + """Listing of different reasons a dependency may fail to package. + + This enum is used to provide good error messages when + :class:`PackagingError` is raised. + """ + + def __repr__(self): + return f"<{self.__class__.__name__}.{self.name}>" + + IS_EXTENSION_MODULE = ( + "Module is a C extension module. torch.package supports Python modules only." + ) + NO_DUNDER_FILE = "Module had no __file__ defined." + SOURCE_FILE_NOT_FOUND = ( + "Module had a __file__, but we could not find it in your filesystem." + ) + DEPENDENCY_RESOLUTION_FAILED = "Dependency resolution failed." + NO_ACTION = ( + "Module did not match against any action pattern. Extern, mock, or intern it." + ) + DENIED = "Module was denied by a pattern." + MOCKED_BUT_STILL_USED = ( + "Module was mocked out, but is still being used in the package. " + "Please intern or extern the mocked modules if objects are supposed to be in " + "the package." + ) + + +@dataclass +class _PatternInfo: + """Holds :class:`PackageExporter`-specific info about how to execute matches against""" + + # What action to take on a module that matches this pattern. + action: _ModuleProviderAction + # The value of `allow_empty` the user gave when specifying the pattern. + allow_empty: bool + # Whether this pattern has been matched during packaging. + was_matched: bool + + def __init__(self, action, allow_empty): + self.action = action + self.allow_empty = allow_empty + self.was_matched = False + + +class EmptyMatchError(Exception): + """This is an exception that is thrown when a mock or extern is marked as + ``allow_empty=False``, and is not matched with any module during packaging. + """ + + pass + + +class PackagingError(Exception): + """This exception is raised when there is an issue with exporting a package. + ``PackageExporter`` will attempt to gather up all the errors and present + them to you at once. + """ + + def __init__(self, dependency_graph: DiGraph, debug=False): + # Group errors by reason. + broken: Dict[PackagingErrorReason, List[str]] = defaultdict(list) + for module_name, attrs in dependency_graph.nodes.items(): + error = attrs.get("error") + if error is None: + continue + if error == PackagingErrorReason.NO_ACTION: + assert "action" not in attrs + broken[error].append(module_name) + + message = io.StringIO() + message.write("\n") + + for reason, module_names in broken.items(): + message.write(f"* {reason.value}\n") + for module_name in module_names: + message.write(f" {module_name}\n") + + # Print additional context if it's provided. + error_context = dependency_graph.nodes[module_name].get("error_context") + if error_context is not None: + message.write(f" Context: {error_context}\n") + if module_name in _DISALLOWED_MODULES: + message.write( + " Note: While we usually use modules in the python standard library " + f"from the local environment, `{module_name}` has a lot of system " + "level access and therefore can pose a security risk. We heavily " + f"recommend removing `{module_name}` from your packaged code. However, if that " + "is not possible, add it to the extern list by calling " + f'PackageExporter.extern("`{module_name}`")\n' + ) + if debug: + module_path = dependency_graph.first_path(module_name) + message.write( + f" A path to {module_name}: {' -> '.join(module_path)}" + ) + if not debug: + message.write("\n") + message.write( + "Set debug=True when invoking PackageExporter for a visualization of where " + "broken modules are coming from!\n" + ) + # Save the dependency graph so that tooling can get at it. + self.dependency_graph = dependency_graph + super().__init__(message.getvalue()) + + +class PackageExporter: + """Exporters allow you to write packages of code, pickled Python data, and + arbitrary binary and text resources into a self-contained package. + + Imports can load this code in a hermetic way, such that code is loaded + from the package rather than the normal Python import system. This allows + for the packaging of PyTorch model code and data so that it can be run + on a server or used in the future for transfer learning. + + The code contained in packages is copied file-by-file from the original + source when it is created, and the file format is a specially organized + zip file. Future users of the package can unzip the package, and edit the code + in order to perform custom modifications to it. + + The importer for packages ensures that code in the module can only be loaded from + within the package, except for modules explicitly listed as external using :meth:`extern`. + The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on. + This prevents "implicit" dependencies where the package runs locally because it is importing + a locally-installed package, but then fails when the package is copied to another machine. + + When source code is added to the package, the exporter can optionally scan it + for further code dependencies (``dependencies=True``). It looks for import statements, + resolves relative references to qualified module names, and performs an action specified by the user + (See: :meth:`extern`, :meth:`mock`, and :meth:`intern`). + """ + + """A importer that will be searched in order to find the modules referenced by other modules or by + pickled objects. The default module environment just uses sys_importer, which searches the Python environment. + """ + importer: Importer + + def __init__( + self, + f: Union[str, Path, BinaryIO], + importer: Union[Importer, Sequence[Importer]] = sys_importer, + debug: bool = False, + ): + """ + Create an exporter. + + Args: + f: The location to export to. Can be a ``string``/``Path`` object containing a filename + or a binary I/O object. + importer: If a single Importer is passed, use that to search for modules. + If a sequence of importers are passed, an ``OrderedImporter`` will be constructed out of them. + debug: If set to True, add path of broken modules to PackagingErrors. + """ + torch._C._log_api_usage_once("torch.package.PackageExporter") + self.debug = debug + if isinstance(f, (Path, str)): + f = str(f) + self.buffer: Optional[BinaryIO] = None + else: # is a byte buffer + self.buffer = f + + self.zip_file = torch._C.PyTorchFileWriter(f) + self.zip_file.set_min_version(6) + self._written_files: Set[str] = set() + + self.serialized_reduces: Dict[int, Any] = {} + + # A graph tracking all the modules and pickle objects added to this + # package and the dependencies between them. + # - Each node is a module name (or a pickle name that looks like '') + # - Each directed edge (u, v) means u depends on v. + # - Nodes may contain metadata that describe how to write the thing to the zipfile. + self.dependency_graph = DiGraph() + self.script_module_serializer = torch._C.ScriptModuleSerializer(self.zip_file) + self.storage_context = self.script_module_serializer.storage_context() + + # These are OrderedDicts for compatibility with RemovableHandle. + # Generic OrderedDict type annotations are not present until 3.7. + # The real type signature is OrderedDict[int, Callable[[PackageExporter, str], None]] + self._extern_hooks: OrderedDict = OrderedDict() + self._mock_hooks: OrderedDict = OrderedDict() + self._intern_hooks: OrderedDict = OrderedDict() + + if isinstance(importer, Importer): + self.importer = importer + else: + if not isinstance(importer, collections.abc.Sequence): + raise TypeError( + "importer arg should be an Importer or a sequence of Importers, " + f"got {type(importer)} instead." + ) + self.importer = OrderedImporter(*importer) + + self.patterns: Dict[GlobGroup, _PatternInfo] = {} + self._unique_id = 0 + + def save_source_file( + self, module_name: str, file_or_directory: str, dependencies=True + ): + """Adds the local file system ``file_or_directory`` to the source package to provide the code + for ``module_name``. + + Args: + module_name (str): e.g. ``"my_package.my_subpackage"``, code will be saved to provide code for this package. + file_or_directory (str): the path to a file or directory of code. When a directory, all python files in the directory + are recursively copied using :meth:`save_source_file`. If a file is named ``"/__init__.py"`` the code is treated + as a package. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + path = Path(file_or_directory) + if path.is_dir(): + to_save = [] # list of tuples with arguments to save_source_string + module_path = module_name.replace(".", "/") + for filename in path.glob("**/*.py"): + relative_path = filename.relative_to(path).as_posix() + archivename = module_path + "/" + relative_path + submodule_name = None + if filename.name == "__init__.py": + submodule_name = archivename[: -len("/__init__.py")].replace( + "/", "." + ) + is_package = True + else: + submodule_name = archivename[: -len(".py")].replace("/", ".") + is_package = False + + # we delay the call to save_source_string so that we record all the source files + # being provided by this directory structure _before_ attempting to resolve the dependencies + # on the source. This makes sure we don't try to copy over modules that will just get + # overwritten by this directory blob + to_save.append( + ( + submodule_name, + _read_file(str(filename)), + is_package, + dependencies, + ) + ) + + for item in to_save: + self.save_source_string(*item) + else: + is_package = path.name == "__init__.py" + self.save_source_string( + module_name, + _read_file(file_or_directory), + is_package, + dependencies, + ) + + def get_unique_id(self) -> str: + """Get an id. This id is guaranteed to only be handed out once for this package.""" + ret = str(self._unique_id) + self._unique_id += 1 + return ret + + def _get_dependencies( + self, src: str, module_name: str, is_package: bool + ) -> List[str]: + """Return all modules that this source code depends on. + + Dependencies are found by scanning the source code for import-like statements. + + Arguments: + src: The Python source code to analyze for dependencies. + module_name: The name of the module that ``src`` corresponds to. + is_package: Whether this module should be treated as a package. + See :py:meth:`save_source_string` for more info. + + Returns: + A list containing modules detected as direct dependencies in + ``src``. The items in the list are guaranteed to be unique. + """ + package_name = ( + module_name if is_package else module_name.rsplit(".", maxsplit=1)[0] + ) + try: + dep_pairs = find_files_source_depends_on(src, package_name) + except Exception as e: + self.dependency_graph.add_node( + module_name, + error=PackagingErrorReason.DEPENDENCY_RESOLUTION_FAILED, + error_context=str(e), + ) + return [] + + # Use a dict to get uniquing but also deterministic order + dependencies = {} + for dep_module_name, dep_module_obj in dep_pairs: + # handle the case where someone did something like `from pack import sub` + # where `sub` is a submodule. In this case we don't have to save pack, just sub. + # this ensures we don't pick up additional dependencies on pack. + # However, in the case where `sub` is not a submodule but an object, then we do have + # to save pack. + if dep_module_obj is not None: + possible_submodule = f"{dep_module_name}.{dep_module_obj}" + if self._module_exists(possible_submodule): + dependencies[possible_submodule] = True + # we don't need to save `pack` + continue + if self._module_exists(dep_module_name): + dependencies[dep_module_name] = True + + return list(dependencies.keys()) + + def save_source_string( + self, + module_name: str, + src: str, + is_package: bool = False, + dependencies: bool = True, + ): + """Adds ``src`` as the source code for ``module_name`` in the exported package. + + Args: + module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code for this package. + src (str): The Python source code to save for this package. + is_package (bool, optional): If ``True``, this module is treated as a package. Packages are allowed to have submodules + (e.g. ``my_package.my_subpackage.my_subsubpackage``), and resources can be saved inside them. Defaults to ``False``. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + self.dependency_graph.add_node( + module_name, + source=src, + is_package=is_package, + provided=True, + action=_ModuleProviderAction.INTERN, + ) + + if dependencies: + deps = self._get_dependencies(src, module_name, is_package) + + for dep in deps: + self.dependency_graph.add_edge(module_name, dep) + self.add_dependency(dep) + + def _write_source_string( + self, + module_name: str, + src: str, + is_package: bool = False, + ): + """Write ``src`` as the source code for ``module_name`` in the zip archive. + + Arguments are otherwise the same as for :meth:`save_source_string`. + """ + extension = "/__init__.py" if is_package else ".py" + filename = module_name.replace(".", "/") + extension + + self._write(filename, src) + + def _import_module(self, module_name: str): + try: + return self.importer.import_module(module_name) + except ModuleNotFoundError as e: + if not is_mangled(module_name): + raise + msg = ( + f"Module not found: '{module_name}'. Make sure the PackageImporter that " + "created this module is present in `self.importer`" + ) + raise ModuleNotFoundError(msg) from None + + def _module_exists(self, module_name: str) -> bool: + try: + self._import_module(module_name) + return True + except Exception: + return False + + def _get_source_of_module(self, module: types.ModuleType) -> Optional[str]: + filename = None + spec = getattr(module, "__spec__", None) + if spec is not None: + loader = getattr(spec, "loader", None) + if loader is not None and isinstance(loader, SourceFileLoader): + try: + filename = loader.get_filename(module.__name__) + except ImportError: + pass + if filename is None: + filename = getattr(module, "__file__", None) + if isinstance(filename, str) and filename.endswith(".py"): + return "".join(linecache.getlines(filename, module.__dict__)) + return None + + def add_dependency(self, module_name: str, dependencies=True): + """Given a module, add it to the dependency graph according to patterns + specified by the user. + """ + if ( + module_name in self.dependency_graph + and self.dependency_graph.nodes[module_name].get("provided") is True + ): + return + + # Special case: PackageImporter provides a special module called + # `torch_package_importer` that allows packaged modules to reference + # their PackageImporter. We don't want to re-export this. + if module_name == "torch_package_importer": + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.SKIP, + provided=True, + ) + return + + if module_name == "_mock": + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.REPACKAGED_MOCK_MODULE, + provided=True, + ) + return + + if self._can_implicitly_extern(module_name): + self.dependency_graph.add_node( + module_name, action=_ModuleProviderAction.EXTERN, provided=True + ) + return + + for pattern, pattern_info in self.patterns.items(): + if pattern.matches(module_name): + pattern_info.was_matched = True + self.dependency_graph.add_node( + module_name, action=pattern_info.action, provided=True + ) + + if pattern_info.action == _ModuleProviderAction.DENY: + # Requiring a denied module just adds an error to the graph. + self.dependency_graph.add_node( + module_name, error=PackagingErrorReason.DENIED + ) + + # If we are interning this module, we need to retrieve its + # dependencies and package those as well. + if pattern_info.action == _ModuleProviderAction.INTERN: + self._intern_module(module_name, dependencies) + return + + # No patterns have matched. Explicitly add this as an error. + self.dependency_graph.add_node( + module_name, error=PackagingErrorReason.NO_ACTION + ) + + def save_module(self, module_name: str, dependencies=True): + """Save the code for ``module`` into the package. Code for the module is resolved using the ``importers`` path to find the + module object, and then using its ``__file__`` attribute to find the source code. + + Args: + module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code + for this package. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + if not isinstance(module_name, str): + raise TypeError( + "save_module() expects a string input, did you perhaps mean to pass `__name__`?" + ) + + self._intern_module(module_name, dependencies) + + def _intern_module( + self, + module_name: str, + dependencies: bool, + ): + """Adds the module to the dependency graph as an interned module, + along with any metadata needed to write it out to the zipfile at serialization time. + """ + module_obj = self._import_module(module_name) + # Subtle: if the import above succeeded, either: + # 1. The module name is not mangled, and this was just a regular import, or + # 2. The module name is mangled, but one of the importers was able to + # recognize the mangling and import it. + # Either way, it is now safe to demangle this name so that we don't + # serialize the mangled version to the package. + module_name = demangle(module_name) + + # Find dependencies of this module and require them as well. + is_package = hasattr(module_obj, "__path__") + source = self._get_source_of_module(module_obj) + if source is None: + # Couldn't find a source! Add it to our dependency graph as broken + # and continue. + filename = getattr(module_obj, "__file__", None) + error_context = None + if filename is None: + packaging_error = PackagingErrorReason.NO_DUNDER_FILE + elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)): + packaging_error = PackagingErrorReason.IS_EXTENSION_MODULE + else: + packaging_error = PackagingErrorReason.SOURCE_FILE_NOT_FOUND + error_context = f"filename: {filename}" + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.INTERN, + is_package=is_package, + error=packaging_error, + error_context=error_context, + provided=True, + ) + return + + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.INTERN, + is_package=is_package, + source=source, + provided=True, + ) + + if dependencies: + deps = self._get_dependencies(source, module_name, is_package) + for dep in deps: + self.dependency_graph.add_edge(module_name, dep) + self.add_dependency(dep) + + def save_pickle( + self, + package: str, + resource: str, + obj: Any, + dependencies: bool = True, + pickle_protocol: int = 3, + ): + """Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into + the archive rather than a stand-alone file. Standard pickle does not save the code, only the objects. + If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required + to reconstruct them and save the relevant code. + + To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``, + ``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that + have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list + for this to work. + + Args: + package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + obj (Any): The object to save, must be picklable. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + + assert (pickle_protocol == 4) or ( + pickle_protocol == 3 + ), "torch.package only supports pickle protocols 3 and 4" + + filename = self._filename(package, resource) + # Write the pickle data for `obj` + data_buf = io.BytesIO() + pickler = create_pickler(data_buf, self.importer, protocol=pickle_protocol) + pickler.persistent_id = self._persistent_id + pickler.dump(obj) + data_value = data_buf.getvalue() + mocked_modules = defaultdict(list) + name_in_dependency_graph = f"<{package}.{resource}>" + self.dependency_graph.add_node( + name_in_dependency_graph, + action=_ModuleProviderAction.INTERN, + provided=True, + is_pickle=True, + ) + + def _check_mocked_error(module: Optional[str], field: Optional[str]): + """ + checks if an object (field) comes from a mocked module and then adds + the pair to mocked_modules which contains mocked modules paired with their + list of mocked objects present in the pickle. + + We also hold the invariant that the first user defined rule that applies + to the module is the one we use. + """ + + assert isinstance(module, str) + assert isinstance(field, str) + if self._can_implicitly_extern(module): + return + for pattern, pattern_info in self.patterns.items(): + if pattern.matches(module): + if pattern_info.action == _ModuleProviderAction.MOCK: + mocked_modules[module].append(field) + return + + if dependencies: + all_dependencies = [] + module = None + field = None + memo: DefaultDict[int, str] = defaultdict(None) + memo_count = 0 + # pickletools.dis(data_value) + for opcode, arg, pos in pickletools.genops(data_value): + if pickle_protocol == 4: + if ( + opcode.name == "SHORT_BINUNICODE" + or opcode.name == "BINUNICODE" + or opcode.name == "BINUNICODE8" + ): + assert isinstance(arg, str) + module = field + field = arg + memo[memo_count] = arg + elif ( + opcode.name == "LONG_BINGET" + or opcode.name == "BINGET" + or opcode.name == "GET" + ): + assert isinstance(arg, int) + module = field + field = memo.get(arg, None) + elif opcode.name == "MEMOIZE": + memo_count += 1 + elif opcode.name == "STACK_GLOBAL": + if module is None: + # If not module was passed on in the entries preceeding this one, continue. + continue + assert isinstance(module, str) + if module not in all_dependencies: + all_dependencies.append(module) + _check_mocked_error(module, field) + elif ( + pickle_protocol == 3 and opcode.name == "GLOBAL" + ): # a global reference + assert isinstance(arg, str) + module, field = arg.split(" ") + if module not in all_dependencies: + all_dependencies.append(module) + _check_mocked_error(module, field) + for module_name in all_dependencies: + self.dependency_graph.add_edge(name_in_dependency_graph, module_name) + + """ If an object happens to come from a mocked module, then we collect these errors and spit them + out with the other errors found by package exporter. + """ + if module in mocked_modules: + assert isinstance(module, str) + fields = mocked_modules[module] + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.MOCK, + error=PackagingErrorReason.MOCKED_BUT_STILL_USED, + error_context=f"Object(s) '{fields}' from module `{module_name}` was mocked out during packaging " + f"but is being used in resource - `{resource}` in package `{package}`. ", + provided=True, + ) + else: + self.add_dependency(module_name) + + self._write(filename, data_value) + + def save_text(self, package: str, resource: str, text: str): + """Save text data to the package. + + Args: + package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + text (str): The contents to save. + """ + return self.save_binary(package, resource, text.encode("utf-8")) + + def save_binary(self, package, resource, binary: bytes): + """Save raw bytes to the package. + + Args: + package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + binary (str): The data to save. + """ + filename = self._filename(package, resource) + self._write(filename, binary) + + def register_extern_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers an extern hook on the exporter. + + The hook will be called each time a module matches against an :meth:`extern` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._extern_hooks) + self._extern_hooks[handle.id] = hook + return handle + + def register_mock_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers a mock hook on the exporter. + + The hook will be called each time a module matches against a :meth:`mock` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._mock_hooks) + self._mock_hooks[handle.id] = hook + return handle + + def register_intern_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers an intern hook on the exporter. + + The hook will be called each time a module matches against an :meth:`intern` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._intern_hooks) + self._intern_hooks[handle.id] = hook + return handle + + def intern( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Specify modules that should be packaged. A module must match some ``intern`` pattern in order to be + included in the package and have its dependencies processed recursively. + + Args: + include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + + allow_empty (bool): An optional flag that specifies whether the intern modules specified by this call + to the ``intern`` method must be matched to some module during packaging. If an ``intern`` module glob + pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) + before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.INTERN, allow_empty + ) + + def mock( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Replace some required modules with a mock implementation. Mocked modules will return a fake + object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes + find files that are imported by model files but whose functionality is never used + (e.g. custom serialization code or training helpers). + Use this function to mock this functionality out without having to modify the original code. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be mocked out. Strings can also be a glob-style pattern + string that may match multiple modules. Any required dependencies that match this pattern + string will be mocked out automatically. + + Examples : + ``'torch.**'`` -- matches ``torch`` and all submodules of torch, e.g. ``'torch.nn'`` + and ``'torch.nn.functional'`` + + ``'torch.*'`` -- matches ``'torch.nn'`` or ``'torch.functional'``, but not + ``'torch.nn.functional'`` + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + e.g. ``include='torch.**', exclude='torch.foo'`` will mock all torch packages except ``'torch.foo'``, + Default: is ``[]``. + + allow_empty (bool): An optional flag that specifies whether the mock implementation(s) specified by this call + to the :meth:`mock` method must be matched to some module during packaging. If a mock is added with + ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) and the mock has + not been matched to a module used by the package being exported, an exception is thrown. + If ``allow_empty=True``, no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.MOCK, allow_empty + ) + + def extern( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Include ``module`` in the list of external modules the package can import. + This will prevent dependency discovery from saving + it in the package. The importer will load an external module directly from the standard import system. + Code for extern modules must also exist in the process loading the package. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as + described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the + include string. + + allow_empty (bool): An optional flag that specifies whether the extern modules specified by this call + to the ``extern`` method must be matched to some module during packaging. If an extern module glob + pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via + ``__exit__``) before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, + no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.EXTERN, allow_empty + ) + + def deny(self, include: "GlobPattern", *, exclude: "GlobPattern" = ()): + """Blocklist modules who names match the given glob patterns from the list of modules the package can import. + If a dependency on any matching packages is found, a :class:`PackagingError` is raised. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.DENY, allow_empty=True + ) + + def _persistent_id(self, obj): + if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage): + storage: Storage + if isinstance(obj, torch.storage.TypedStorage): + # TODO: Once we decide to break serialization FC, we can + # remove this case + untyped_storage = obj._untyped_storage + storage_type_str = obj.pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + storage = cast(Storage, untyped_storage) + storage_numel = obj.size() + + elif isinstance(obj, torch.UntypedStorage): + untyped_storage = obj + storage = cast(Storage, untyped_storage) + storage_type = normalize_storage_type(type(storage)) + storage_numel = storage.nbytes() + else: + raise RuntimeError(f"storage type not recognized: {type(obj)}") + + location = location_tag(storage) + + # serialize storage if not already written + storage_present = self.storage_context.has_storage(storage) + storage_id = self.storage_context.get_or_add_storage(storage) + if not storage_present: + if storage.device.type != "cpu": + storage = storage.cpu() + num_bytes = storage.nbytes() + self.zip_file.write_record( + f".data/{storage_id}.storage", storage, num_bytes + ) + return ("storage", storage_type, storage_id, location, storage_numel) + + if hasattr(obj, "__reduce_package__"): + if _gate_torchscript_serialization and isinstance( + obj, torch.jit.RecursiveScriptModule + ): + raise Exception( + "Serializing ScriptModules directly into a package is a beta feature. " + "To use, set global " + "`torch.package.package_exporter._gate_torchscript_serialization` to `False`." + ) + if self.serialized_reduces.get(id(obj)) is None: + self.serialized_reduces[id(obj)] = ( + "reduce_package", + id(obj), + *obj.__reduce_package__(self), + ) + + return self.serialized_reduces[id(obj)] + + return None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + # If __exit__ was called because an exception was raised, we do not + # attempt to finalize the package. Instead, control is returned to the + # caller to continue raising the exception. + if exc_type is not None: + # Do the bare minimum to leave the open buffer in a valid state. + self._finalize_zip() + return + + self.close() + + def _write(self, filename, str_or_bytes): + if filename in self._written_files: + raise AssertionError( + f"Tried to write file '{filename}', but it already exists in this archive. " + "Please file a bug." + ) + self._written_files.add(filename) + + if is_mangled(filename): + raise AssertionError( + f"Tried to save a torch.package'd module as '{filename}'. " + "Directly saving torch.package'd modules is not allowed." + ) + if isinstance(str_or_bytes, str): + str_or_bytes = str_or_bytes.encode("utf-8") + self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes)) + + def _validate_dependency_graph(self): + # 1. Check the graph for any errors inserted during dependency analysis. + for attrs in self.dependency_graph.nodes.values(): + if "error" in attrs: + raise PackagingError(self.dependency_graph, debug=self.debug) + + # 2. Check that all patterns for which allow_empty=False have been matched at least once. + for pattern, pattern_info in self.patterns.items(): + if not pattern_info.allow_empty and not pattern_info.was_matched: + raise EmptyMatchError( + f"Exporter did not match any modules to {pattern}, which was marked as allow_empty=False" + ) + + def _write_mock_file(self): + if "_mock.py" not in self._written_files: + mock_file = str(Path(__file__).parent / "_mock.py") + self._write_source_string("_mock", _read_file(mock_file), is_package=False) + + def _execute_dependency_graph(self): + """Takes a finalized dependency graph describing how to package all + modules and executes it, writing to the ZIP archive. + """ + self._validate_dependency_graph() + + extern_modules = [] + for module_name, attrs in self.dependency_graph.nodes.items(): + action = attrs["action"] + + if action == _ModuleProviderAction.EXTERN: + for hook in self._extern_hooks.values(): + hook(self, module_name) + + extern_modules.append(module_name) + + elif action == _ModuleProviderAction.MOCK: + for hook in self._mock_hooks.values(): + hook(self, module_name) + + self._write_mock_file() + + is_package = hasattr(self._import_module(module_name), "__path__") + self._write_source_string(module_name, _MOCK_IMPL, is_package) + + elif action == _ModuleProviderAction.INTERN: + for hook in self._intern_hooks.values(): + hook(self, module_name) + + # The node in the dependency graph contains metadata that tells us + # how to intern the module. + if "provided" not in attrs: + raise AssertionError( + f"Module was marked `intern` but not provided: {module_name}" + ) + + if attrs.get("is_pickle") is True: + # This node came from save_pickle, we don't need to write any source for it. + continue + + is_package = attrs["is_package"] + source = attrs["source"] + self._write_source_string(module_name, source, is_package) + + elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE: + self._write_mock_file() + elif action == _ModuleProviderAction.SKIP: + continue + else: + raise AssertionError( + f"Invalid action: {module_name}, {action}. Please report a bug to PyTorch." + ) + + extern_file_contents = "\n".join(extern_modules) + "\n" + self._write(".data/extern_modules", extern_file_contents) + + def _write_python_version(self): + """Writes the python version that the package was created with to .data/python_version""" + self._write(".data/python_version", platform.python_version()) + + def close(self): + """Write the package to the filesystem. Any calls after :meth:`close` are now invalid. + It is preferable to use resource guard syntax instead:: + + with PackageExporter("file.zip") as e: + ... + """ + self._execute_dependency_graph() + self._write_python_version() + + self.script_module_serializer.write_files() + self._finalize_zip() + + def _finalize_zip(self): + """Called at the very end of packaging to leave the zipfile in a closed but valid state.""" + del self.zip_file + if self.buffer: + self.buffer.flush() + + def _filename(self, package, resource): + package_path = package.replace(".", "/") + resource = _normalize_path(resource) + return f"{package_path}/{resource}" + + def _can_implicitly_extern(self, module_name: str): + top_level_package_name = module_name.partition(".")[0] + return top_level_package_name == "torch" or ( + top_level_package_name not in _DISALLOWED_MODULES + and is_stdlib_module(top_level_package_name) + ) + + def dependency_graph_string(self) -> str: + """Returns digraph string representation of dependencies in package. + + Returns: + A string representation of dependencies in package. + """ + return self.dependency_graph.to_dot() + + def _nodes_with_action_type( + self, action: Optional[_ModuleProviderAction] + ) -> List[str]: + result = [] + for name, node_dict in self.dependency_graph.nodes.items(): + node_action = node_dict.get("action", None) + if node_action == action and "is_pickle" not in node_dict: + result.append(name) + result.sort() + return result + + def externed_modules(self) -> List[str]: + """Return all modules that are currently externed. + + Returns: + A list containing the names of modules which will be + externed in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.EXTERN) + + def interned_modules(self) -> List[str]: + """Return all modules that are currently interned. + + Returns: + A list containing the names of modules which will be + interned in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.INTERN) + + def mocked_modules(self) -> List[str]: + """Return all modules that are currently mocked. + + Returns: + A list containing the names of modules which will be + mocked in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.MOCK) + + def denied_modules(self) -> List[str]: + """Return all modules that are currently denied. + + Returns: + A list containing the names of modules which will be + denied in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.DENY) + + def get_rdeps(self, module_name: str) -> List[str]: + """Return a list of all modules which depend on the module ``module_name``. + + Returns: + A list containing the names of modules which depend on ``module_name``. + """ + if module_name in self.dependency_graph._pred.keys(): + return list(self.dependency_graph._pred[module_name].keys()) + else: + return [] + + def all_paths(self, src: str, dst: str) -> str: + """Return a dot representation of the subgraph + that has all paths from src to dst. + + Returns: + A dot representation containing all paths from src to dst. + (https://graphviz.org/doc/info/lang.html) + """ + return self.dependency_graph.all_paths(src, dst) + + +# even though these are in the standard library, we do not allow them to be +# automatically externed since they offer a lot of system level access +_DISALLOWED_MODULES = ["sys", "io"] + +_MOCK_IMPL = """\ +from _mock import MockedObject +def __getattr__(attr: str): + return MockedObject(__name__ + '.' + attr, _suppress_err=True) +""" + + +def _read_file(filename: str) -> str: + with open(filename, "rb") as f: + b = f.read() + return b.decode("utf-8") diff --git a/venv/lib/python3.10/site-packages/torch/package/package_importer.py b/venv/lib/python3.10/site-packages/torch/package/package_importer.py new file mode 100644 index 0000000000000000000000000000000000000000..93f2eee5e22aeab632ffc405d5c7f5f4b5f1099e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/package/package_importer.py @@ -0,0 +1,759 @@ +import builtins +import importlib +import importlib.machinery +import inspect +import io +import linecache +import os +import types +from contextlib import contextmanager +from typing import Any, BinaryIO, Callable, cast, Dict, Iterable, List, Optional, Union +from weakref import WeakValueDictionary + +import torch +from torch.serialization import _get_restore_location, _maybe_decode_ascii + +from ._directory_reader import DirectoryReader +from ._importlib import ( + _calc___package__, + _normalize_line_endings, + _normalize_path, + _resolve_name, + _sanity_check, +) +from ._mangling import demangle, PackageMangler +from ._package_unpickler import PackageUnpickler +from .file_structure_representation import _create_directory_from_file_list, Directory +from .glob_group import GlobPattern +from .importer import Importer + +__all__ = ["PackageImporter"] + + +# This is a list of imports that are implicitly allowed even if they haven't +# been marked as extern. This is to work around the fact that Torch implicitly +# depends on numpy and package can't track it. +# https://github.com/pytorch/MultiPy/issues/46 +IMPLICIT_IMPORT_ALLOWLIST: Iterable[str] = [ + "numpy", + "numpy.core", + "numpy.core._multiarray_umath", + # FX GraphModule might depend on builtins module and users usually + # don't extern builtins. Here we import it here by default. + "builtins", +] + + +class PackageImporter(Importer): + """Importers allow you to load code written to packages by :class:`PackageExporter`. + Code is loaded in a hermetic way, using files from the package + rather than the normal python import system. This allows + for the packaging of PyTorch model code and data so that it can be run + on a server or used in the future for transfer learning. + + The importer for packages ensures that code in the module can only be loaded from + within the package, except for modules explicitly listed as external during export. + The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on. + This prevents "implicit" dependencies where the package runs locally because it is importing + a locally-installed package, but then fails when the package is copied to another machine. + """ + + """The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but + local to this importer. + """ + + modules: Dict[str, types.ModuleType] + + def __init__( + self, + file_or_buffer: Union[str, torch._C.PyTorchFileReader, os.PathLike, BinaryIO], + module_allowed: Callable[[str], bool] = lambda module_name: True, + ): + """Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules + allowed by ``module_allowed`` + + Args: + file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`), + a string, or an ``os.PathLike`` object containing a filename. + module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module + should be allowed. Can be used to ensure packages loaded do not depend on modules that the server + does not support. Defaults to allowing anything. + + Raises: + ImportError: If the package will use a disallowed module. + """ + torch._C._log_api_usage_once("torch.package.PackageImporter") + + self.zip_reader: Any + if isinstance(file_or_buffer, torch._C.PyTorchFileReader): + self.filename = "" + self.zip_reader = file_or_buffer + elif isinstance(file_or_buffer, (os.PathLike, str)): + self.filename = os.fspath(file_or_buffer) + if not os.path.isdir(self.filename): + self.zip_reader = torch._C.PyTorchFileReader(self.filename) + else: + self.zip_reader = DirectoryReader(self.filename) + else: + self.filename = "" + self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer) + + torch._C._log_api_usage_metadata( + "torch.package.PackageImporter.metadata", + { + "serialization_id": self.zip_reader.serialization_id(), + "file_name": self.filename, + }, + ) + + self.root = _PackageNode(None) + self.modules = {} + self.extern_modules = self._read_extern() + + for extern_module in self.extern_modules: + if not module_allowed(extern_module): + raise ImportError( + f"package '{file_or_buffer}' needs the external module '{extern_module}' " + f"but that module has been disallowed" + ) + self._add_extern(extern_module) + + for fname in self.zip_reader.get_all_records(): + self._add_file(fname) + + self.patched_builtins = builtins.__dict__.copy() + self.patched_builtins["__import__"] = self.__import__ + # Allow packaged modules to reference their PackageImporter + self.modules["torch_package_importer"] = self # type: ignore[assignment] + + self._mangler = PackageMangler() + + # used for reduce deserializaiton + self.storage_context: Any = None + self.last_map_location = None + + # used for torch.serialization._load + self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs) + + def import_module(self, name: str, package=None): + """Load a module from the package if it hasn't already been loaded, and then return + the module. Modules are loaded locally + to the importer and will appear in ``self.modules`` rather than ``sys.modules``. + + Args: + name (str): Fully qualified name of the module to load. + package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``. + + Returns: + types.ModuleType: The (possibly already) loaded module. + """ + # We should always be able to support importing modules from this package. + # This is to support something like: + # obj = importer.load_pickle(...) + # importer.import_module(obj.__module__) <- this string will be mangled + # + # Note that _mangler.demangle will not demangle any module names + # produced by a different PackageImporter instance. + name = self._mangler.demangle(name) + + return self._gcd_import(name) + + def load_binary(self, package: str, resource: str) -> bytes: + """Load raw bytes. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + + Returns: + bytes: The loaded data. + """ + + path = self._zipfile_path(package, resource) + return self.zip_reader.get_record(path) + + def load_text( + self, + package: str, + resource: str, + encoding: str = "utf-8", + errors: str = "strict", + ) -> str: + """Load a string. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``. + errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``. + + Returns: + str: The loaded text. + """ + data = self.load_binary(package, resource) + return data.decode(encoding, errors) + + def load_pickle(self, package: str, resource: str, map_location=None) -> Any: + """Unpickles the resource from the package, loading any modules that are needed to construct the objects + using :meth:`import_module`. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``. + + Returns: + Any: The unpickled object. + """ + pickle_file = self._zipfile_path(package, resource) + restore_location = _get_restore_location(map_location) + loaded_storages = {} + loaded_reduces = {} + storage_context = torch._C.DeserializationStorageContext() + + def load_tensor(dtype, size, key, location, restore_location): + name = f"{key}.storage" + + if storage_context.has_storage(name): + storage = storage_context.get_storage(name, dtype)._typed_storage() + else: + tensor = self.zip_reader.get_storage_from_record( + ".data/" + name, size, dtype + ) + if isinstance(self.zip_reader, torch._C.PyTorchFileReader): + storage_context.add_storage(name, tensor) + storage = tensor._typed_storage() + loaded_storages[key] = restore_location(storage, location) + + def persistent_load(saved_id): + assert isinstance(saved_id, tuple) + typename = _maybe_decode_ascii(saved_id[0]) + data = saved_id[1:] + + if typename == "storage": + storage_type, key, location, size = data + dtype = storage_type.dtype + + if key not in loaded_storages: + load_tensor( + dtype, + size, + key, + _maybe_decode_ascii(location), + restore_location, + ) + storage = loaded_storages[key] + # TODO: Once we decide to break serialization FC, we can + # stop wrapping with TypedStorage + return torch.storage.TypedStorage( + wrap_storage=storage._untyped_storage, dtype=dtype, _internal=True + ) + elif typename == "reduce_package": + # to fix BC breaking change, objects on this load path + # will be loaded multiple times erroneously + if len(data) == 2: + func, args = data + return func(self, *args) + reduce_id, func, args = data + if reduce_id not in loaded_reduces: + loaded_reduces[reduce_id] = func(self, *args) + return loaded_reduces[reduce_id] + else: + f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'" + + # Load the data (which may in turn use `persistent_load` to load tensors) + data_file = io.BytesIO(self.zip_reader.get_record(pickle_file)) + unpickler = self.Unpickler(data_file) + unpickler.persistent_load = persistent_load # type: ignore[assignment] + + @contextmanager + def set_deserialization_context(): + # to let reduce_package access deserializaiton context + self.storage_context = storage_context + self.last_map_location = map_location + try: + yield + finally: + self.storage_context = None + self.last_map_location = None + + with set_deserialization_context(): + result = unpickler.load() + + # TODO from zdevito: + # This stateful weird function will need to be removed in our efforts + # to unify the format. It has a race condition if multiple python + # threads try to read independent files + torch._utils._validate_loaded_sparse_tensors() + + return result + + def id(self): + """ + Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances. + Looks like:: + + + """ + return self._mangler.parent_name() + + def file_structure( + self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = () + ) -> Directory: + """Returns a file structure representation of package's zipfile. + + Args: + include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings + for the names of the files to be included in the zipfile representation. This can also be + a glob-style pattern, as described in :meth:`PackageExporter.mock` + + exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern. + + Returns: + :class:`Directory` + """ + return _create_directory_from_file_list( + self.filename, self.zip_reader.get_all_records(), include, exclude + ) + + def python_version(self): + """Returns the version of python that was used to create this package. + + Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock + file later on. + + Returns: + :class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package + """ + python_version_path = ".data/python_version" + return ( + self.zip_reader.get_record(python_version_path).decode("utf-8").strip() + if self.zip_reader.has_record(python_version_path) + else None + ) + + def _read_extern(self): + return ( + self.zip_reader.get_record(".data/extern_modules") + .decode("utf-8") + .splitlines(keepends=False) + ) + + def _make_module( + self, name: str, filename: Optional[str], is_package: bool, parent: str + ): + mangled_filename = self._mangler.mangle(filename) if filename else None + spec = importlib.machinery.ModuleSpec( + name, + self, # type: ignore[arg-type] + origin="", + is_package=is_package, + ) + module = importlib.util.module_from_spec(spec) + self.modules[name] = module + module.__name__ = self._mangler.mangle(name) + ns = module.__dict__ + ns["__spec__"] = spec + ns["__loader__"] = self + ns["__file__"] = mangled_filename + ns["__cached__"] = None + ns["__builtins__"] = self.patched_builtins + ns["__torch_package__"] = True + + # Add this module to our private global registry. It should be unique due to mangling. + assert module.__name__ not in _package_imported_modules + _package_imported_modules[module.__name__] = module + + # pre-emptively install on the parent to prevent IMPORT_FROM from trying to + # access sys.modules + self._install_on_parent(parent, name, module) + + if filename is not None: + assert mangled_filename is not None + # pre-emptively install the source in `linecache` so that stack traces, + # `inspect`, etc. work. + assert filename not in linecache.cache # type: ignore[attr-defined] + linecache.lazycache(mangled_filename, ns) + + code = self._compile_source(filename, mangled_filename) + exec(code, ns) + + return module + + def _load_module(self, name: str, parent: str): + cur: _PathNode = self.root + for atom in name.split("."): + if not isinstance(cur, _PackageNode) or atom not in cur.children: + if name in IMPLICIT_IMPORT_ALLOWLIST: + module = self.modules[name] = importlib.import_module(name) + return module + raise ModuleNotFoundError( + f'No module named "{name}" in self-contained archive "{self.filename}"' + f" and the module is also not in the list of allowed external modules: {self.extern_modules}", + name=name, + ) + cur = cur.children[atom] + if isinstance(cur, _ExternNode): + module = self.modules[name] = importlib.import_module(name) + return module + return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined] + + def _compile_source(self, fullpath: str, mangled_filename: str): + source = self.zip_reader.get_record(fullpath) + source = _normalize_line_endings(source) + return compile(source, mangled_filename, "exec", dont_inherit=True) + + # note: named `get_source` so that linecache can find the source + # when this is the __loader__ of a module. + def get_source(self, module_name) -> str: + # linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here. + module = self.import_module(demangle(module_name)) + return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8") + + # note: named `get_resource_reader` so that importlib.resources can find it. + # This is otherwise considered an internal method. + def get_resource_reader(self, fullname): + try: + package = self._get_package(fullname) + except ImportError: + return None + if package.__loader__ is not self: + return None + return _PackageResourceReader(self, fullname) + + def _install_on_parent(self, parent: str, name: str, module: types.ModuleType): + if not parent: + return + # Set the module as an attribute on its parent. + parent_module = self.modules[parent] + if parent_module.__loader__ is self: + setattr(parent_module, name.rpartition(".")[2], module) + + # note: copied from cpython's import code, with call to create module replaced with _make_module + def _do_find_and_load(self, name): + path = None + parent = name.rpartition(".")[0] + module_name_no_parent = name.rpartition(".")[-1] + if parent: + if parent not in self.modules: + self._gcd_import(parent) + # Crazy side-effects! + if name in self.modules: + return self.modules[name] + parent_module = self.modules[parent] + + try: + path = parent_module.__path__ # type: ignore[attr-defined] + + except AttributeError: + # when we attempt to import a package only containing pybinded files, + # the parent directory isn't always a package as defined by python, + # so we search if the package is actually there or not before calling the error. + if isinstance( + parent_module.__loader__, + importlib.machinery.ExtensionFileLoader, + ): + if name not in self.extern_modules: + msg = ( + _ERR_MSG + + "; {!r} is a c extension module which was not externed. C extension modules \ + need to be externed by the PackageExporter in order to be used as we do not support interning them.}." + ).format(name, name) + raise ModuleNotFoundError(msg, name=name) from None + if not isinstance( + parent_module.__dict__.get(module_name_no_parent), + types.ModuleType, + ): + msg = ( + _ERR_MSG + + "; {!r} is a c extension package which does not contain {!r}." + ).format(name, parent, name) + raise ModuleNotFoundError(msg, name=name) from None + else: + msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent) + raise ModuleNotFoundError(msg, name=name) from None + + module = self._load_module(name, parent) + + self._install_on_parent(parent, name, module) + + return module + + # note: copied from cpython's import code + def _find_and_load(self, name): + module = self.modules.get(name, _NEEDS_LOADING) + if module is _NEEDS_LOADING: + return self._do_find_and_load(name) + + if module is None: + message = f"import of {name} halted; None in sys.modules" + raise ModuleNotFoundError(message, name=name) + + # To handle https://github.com/pytorch/pytorch/issues/57490, where std's + # creation of fake submodules via the hacking of sys.modules is not import + # friendly + if name == "os": + self.modules["os.path"] = cast(Any, module).path + elif name == "typing": + self.modules["typing.io"] = cast(Any, module).io + self.modules["typing.re"] = cast(Any, module).re + + return module + + def _gcd_import(self, name, package=None, level=0): + """Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + """ + _sanity_check(name, package, level) + if level > 0: + name = _resolve_name(name, package, level) + + return self._find_and_load(name) + + # note: copied from cpython's import code + def _handle_fromlist(self, module, fromlist, *, recursive=False): + """Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + """ + module_name = demangle(module.__name__) + # The hell that is fromlist ... + # If a package was imported, try to import stuff from fromlist. + if hasattr(module, "__path__"): + for x in fromlist: + if not isinstance(x, str): + if recursive: + where = module_name + ".__all__" + else: + where = "``from list''" + raise TypeError( + f"Item in {where} must be str, " f"not {type(x).__name__}" + ) + elif x == "*": + if not recursive and hasattr(module, "__all__"): + self._handle_fromlist(module, module.__all__, recursive=True) + elif not hasattr(module, x): + from_name = f"{module_name}.{x}" + try: + self._gcd_import(from_name) + except ModuleNotFoundError as exc: + # Backwards-compatibility dictates we ignore failed + # imports triggered by fromlist for modules that don't + # exist. + if ( + exc.name == from_name + and self.modules.get(from_name, _NEEDS_LOADING) is not None + ): + continue + raise + return module + + def __import__(self, name, globals=None, locals=None, fromlist=(), level=0): + if level == 0: + module = self._gcd_import(name) + else: + globals_ = globals if globals is not None else {} + package = _calc___package__(globals_) + module = self._gcd_import(name, package, level) + if not fromlist: + # Return up to the first dot in 'name'. This is complicated by the fact + # that 'name' may be relative. + if level == 0: + return self._gcd_import(name.partition(".")[0]) + elif not name: + return module + else: + # Figure out where to slice the module's name up to the first dot + # in 'name'. + cut_off = len(name) - len(name.partition(".")[0]) + # Slice end needs to be positive to alleviate need to special-case + # when ``'.' not in name``. + module_name = demangle(module.__name__) + return self.modules[module_name[: len(module_name) - cut_off]] + else: + return self._handle_fromlist(module, fromlist) + + def _get_package(self, package): + """Take a package name or module object and return the module. + + If a name, the module is imported. If the passed or imported module + object is not a package, raise an exception. + """ + if hasattr(package, "__spec__"): + if package.__spec__.submodule_search_locations is None: + raise TypeError(f"{package.__spec__.name!r} is not a package") + else: + return package + else: + module = self.import_module(package) + if module.__spec__.submodule_search_locations is None: + raise TypeError(f"{package!r} is not a package") + else: + return module + + def _zipfile_path(self, package, resource=None): + package = self._get_package(package) + assert package.__loader__ is self + name = demangle(package.__name__) + if resource is not None: + resource = _normalize_path(resource) + return f"{name.replace('.', '/')}/{resource}" + else: + return f"{name.replace('.', '/')}" + + def _get_or_create_package( + self, atoms: List[str] + ) -> "Union[_PackageNode, _ExternNode]": + cur = self.root + for i, atom in enumerate(atoms): + node = cur.children.get(atom, None) + if node is None: + node = cur.children[atom] = _PackageNode(None) + if isinstance(node, _ExternNode): + return node + if isinstance(node, _ModuleNode): + name = ".".join(atoms[:i]) + raise ImportError( + f"inconsistent module structure. module {name} is not a package, but has submodules" + ) + assert isinstance(node, _PackageNode) + cur = node + return cur + + def _add_file(self, filename: str): + """Assembles a Python module out of the given file. Will ignore files in the .data directory. + + Args: + filename (str): the name of the file inside of the package archive to be added + """ + *prefix, last = filename.split("/") + if len(prefix) > 1 and prefix[0] == ".data": + return + package = self._get_or_create_package(prefix) + if isinstance(package, _ExternNode): + raise ImportError( + f"inconsistent module structure. package contains a module file {filename}" + f" that is a subpackage of a module marked external." + ) + if last == "__init__.py": + package.source_file = filename + elif last.endswith(".py"): + package_name = last[: -len(".py")] + package.children[package_name] = _ModuleNode(filename) + + def _add_extern(self, extern_name: str): + *prefix, last = extern_name.split(".") + package = self._get_or_create_package(prefix) + if isinstance(package, _ExternNode): + return # the shorter extern covers this extern case + package.children[last] = _ExternNode() + + +_NEEDS_LOADING = object() +_ERR_MSG_PREFIX = "No module named " +_ERR_MSG = _ERR_MSG_PREFIX + "{!r}" + + +class _PathNode: + pass + + +class _PackageNode(_PathNode): + def __init__(self, source_file: Optional[str]): + self.source_file = source_file + self.children: Dict[str, _PathNode] = {} + + +class _ModuleNode(_PathNode): + __slots__ = ["source_file"] + + def __init__(self, source_file: str): + self.source_file = source_file + + +class _ExternNode(_PathNode): + pass + + +# A private global registry of all modules that have been package-imported. +_package_imported_modules: WeakValueDictionary = WeakValueDictionary() + +# `inspect` by default only looks in `sys.modules` to find source files for classes. +# Patch it to check our private registry of package-imported modules as well. +_orig_getfile = inspect.getfile + + +def _patched_getfile(object): + if inspect.isclass(object): + if object.__module__ in _package_imported_modules: + return _package_imported_modules[object.__module__].__file__ + return _orig_getfile(object) + + +inspect.getfile = _patched_getfile + + +class _PackageResourceReader: + """Private class used to support PackageImporter.get_resource_reader(). + + Confirms to the importlib.abc.ResourceReader interface. Allowed to access + the innards of PackageImporter. + """ + + def __init__(self, importer, fullname): + self.importer = importer + self.fullname = fullname + + def open_resource(self, resource): + from io import BytesIO + + return BytesIO(self.importer.load_binary(self.fullname, resource)) + + def resource_path(self, resource): + # The contract for resource_path is that it either returns a concrete + # file system path or raises FileNotFoundError. + if isinstance( + self.importer.zip_reader, DirectoryReader + ) and self.importer.zip_reader.has_record( + os.path.join(self.fullname, resource) + ): + return os.path.join( + self.importer.zip_reader.directory, self.fullname, resource + ) + raise FileNotFoundError + + def is_resource(self, name): + path = self.importer._zipfile_path(self.fullname, name) + return self.importer.zip_reader.has_record(path) + + def contents(self): + from pathlib import Path + + filename = self.fullname.replace(".", "/") + + fullname_path = Path(self.importer._zipfile_path(self.fullname)) + files = self.importer.zip_reader.get_all_records() + subdirs_seen = set() + for filename in files: + try: + relative = Path(filename).relative_to(fullname_path) + except ValueError: + continue + # If the path of the file (which is relative to the top of the zip + # namespace), relative to the package given when the resource + # reader was created, has a parent, then it's a name in a + # subdirectory and thus we skip it. + parent_name = relative.parent.name + if len(parent_name) == 0: + yield relative.name + elif parent_name not in subdirs_seen: + subdirs_seen.add(parent_name) + yield parent_name