Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg.pt +3 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/distributions/beta.py +107 -0
- venv/lib/python3.10/site-packages/torch/distributions/categorical.py +155 -0
- venv/lib/python3.10/site-packages/torch/distributions/cauchy.py +90 -0
- venv/lib/python3.10/site-packages/torch/distributions/chi2.py +33 -0
- venv/lib/python3.10/site-packages/torch/distributions/independent.py +125 -0
- venv/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py +142 -0
- venv/lib/python3.10/site-packages/torch/distributions/log_normal.py +62 -0
- venv/lib/python3.10/site-packages/torch/distributions/multinomial.py +135 -0
- venv/lib/python3.10/site-packages/torch/distributions/pareto.py +60 -0
- venv/lib/python3.10/site-packages/torch/distributions/relaxed_bernoulli.py +149 -0
- venv/lib/python3.10/site-packages/torch/distributions/transforms.py +1245 -0
- venv/lib/python3.10/site-packages/torch/distributions/uniform.py +99 -0
- venv/lib/python3.10/site-packages/torch/distributions/utils.py +177 -0
- venv/lib/python3.10/site-packages/torch/distributions/von_mises.py +209 -0
- venv/lib/python3.10/site-packages/torch/distributions/weibull.py +83 -0
- venv/lib/python3.10/site-packages/torch/mps/__init__.py +130 -0
- venv/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/mps/event.py +45 -0
- venv/lib/python3.10/site-packages/torch/mps/profiler.py +59 -0
- venv/lib/python3.10/site-packages/torch/package/__init__.py +12 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/7.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14abea4c43e48d1f4affe5a9d8d7b0feee8ca83a5ed9f53867d7ae700ebd5487
|
3 |
+
size 50332828
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.75 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/autotune_process.cpython-310.pyc
ADDED
Binary file (18 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/bounds.cpython-310.pyc
ADDED
Binary file (4.56 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/codecache.cpython-310.pyc
ADDED
Binary file (76.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/comms.cpython-310.pyc
ADDED
Binary file (9.99 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-310.pyc
ADDED
Binary file (34.4 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/config.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-310.pyc
ADDED
Binary file (60.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/exc.cpython-310.pyc
ADDED
Binary file (4.31 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/fx_utils.cpython-310.pyc
ADDED
Binary file (7.11 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/graph.cpython-310.pyc
ADDED
Binary file (34.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/hooks.cpython-310.pyc
ADDED
Binary file (792 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/index_propagation.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/inductor_prims.cpython-310.pyc
ADDED
Binary file (3.57 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc
ADDED
Binary file (145 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/optimize_indexing.cpython-310.pyc
ADDED
Binary file (2.65 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/_inductor/__pycache__/wrapper_benchmark.cpython-310.pyc
ADDED
Binary file (8.91 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/distributions/beta.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numbers import Number, Real
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.distributions import constraints
|
5 |
+
from torch.distributions.dirichlet import Dirichlet
|
6 |
+
from torch.distributions.exp_family import ExponentialFamily
|
7 |
+
from torch.distributions.utils import broadcast_all
|
8 |
+
|
9 |
+
__all__ = ["Beta"]
|
10 |
+
|
11 |
+
|
12 |
+
class Beta(ExponentialFamily):
|
13 |
+
r"""
|
14 |
+
Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`.
|
15 |
+
|
16 |
+
Example::
|
17 |
+
|
18 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
19 |
+
>>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
|
20 |
+
>>> m.sample() # Beta distributed with concentration concentration1 and concentration0
|
21 |
+
tensor([ 0.1046])
|
22 |
+
|
23 |
+
Args:
|
24 |
+
concentration1 (float or Tensor): 1st concentration parameter of the distribution
|
25 |
+
(often referred to as alpha)
|
26 |
+
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
|
27 |
+
(often referred to as beta)
|
28 |
+
"""
|
29 |
+
arg_constraints = {
|
30 |
+
"concentration1": constraints.positive,
|
31 |
+
"concentration0": constraints.positive,
|
32 |
+
}
|
33 |
+
support = constraints.unit_interval
|
34 |
+
has_rsample = True
|
35 |
+
|
36 |
+
def __init__(self, concentration1, concentration0, validate_args=None):
|
37 |
+
if isinstance(concentration1, Real) and isinstance(concentration0, Real):
|
38 |
+
concentration1_concentration0 = torch.tensor(
|
39 |
+
[float(concentration1), float(concentration0)]
|
40 |
+
)
|
41 |
+
else:
|
42 |
+
concentration1, concentration0 = broadcast_all(
|
43 |
+
concentration1, concentration0
|
44 |
+
)
|
45 |
+
concentration1_concentration0 = torch.stack(
|
46 |
+
[concentration1, concentration0], -1
|
47 |
+
)
|
48 |
+
self._dirichlet = Dirichlet(
|
49 |
+
concentration1_concentration0, validate_args=validate_args
|
50 |
+
)
|
51 |
+
super().__init__(self._dirichlet._batch_shape, validate_args=validate_args)
|
52 |
+
|
53 |
+
def expand(self, batch_shape, _instance=None):
|
54 |
+
new = self._get_checked_instance(Beta, _instance)
|
55 |
+
batch_shape = torch.Size(batch_shape)
|
56 |
+
new._dirichlet = self._dirichlet.expand(batch_shape)
|
57 |
+
super(Beta, new).__init__(batch_shape, validate_args=False)
|
58 |
+
new._validate_args = self._validate_args
|
59 |
+
return new
|
60 |
+
|
61 |
+
@property
|
62 |
+
def mean(self):
|
63 |
+
return self.concentration1 / (self.concentration1 + self.concentration0)
|
64 |
+
|
65 |
+
@property
|
66 |
+
def mode(self):
|
67 |
+
return self._dirichlet.mode[..., 0]
|
68 |
+
|
69 |
+
@property
|
70 |
+
def variance(self):
|
71 |
+
total = self.concentration1 + self.concentration0
|
72 |
+
return self.concentration1 * self.concentration0 / (total.pow(2) * (total + 1))
|
73 |
+
|
74 |
+
def rsample(self, sample_shape=()):
|
75 |
+
return self._dirichlet.rsample(sample_shape).select(-1, 0)
|
76 |
+
|
77 |
+
def log_prob(self, value):
|
78 |
+
if self._validate_args:
|
79 |
+
self._validate_sample(value)
|
80 |
+
heads_tails = torch.stack([value, 1.0 - value], -1)
|
81 |
+
return self._dirichlet.log_prob(heads_tails)
|
82 |
+
|
83 |
+
def entropy(self):
|
84 |
+
return self._dirichlet.entropy()
|
85 |
+
|
86 |
+
@property
|
87 |
+
def concentration1(self):
|
88 |
+
result = self._dirichlet.concentration[..., 0]
|
89 |
+
if isinstance(result, Number):
|
90 |
+
return torch.tensor([result])
|
91 |
+
else:
|
92 |
+
return result
|
93 |
+
|
94 |
+
@property
|
95 |
+
def concentration0(self):
|
96 |
+
result = self._dirichlet.concentration[..., 1]
|
97 |
+
if isinstance(result, Number):
|
98 |
+
return torch.tensor([result])
|
99 |
+
else:
|
100 |
+
return result
|
101 |
+
|
102 |
+
@property
|
103 |
+
def _natural_params(self):
|
104 |
+
return (self.concentration1, self.concentration0)
|
105 |
+
|
106 |
+
def _log_normalizer(self, x, y):
|
107 |
+
return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
|
venv/lib/python3.10/site-packages/torch/distributions/categorical.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nan
|
3 |
+
from torch.distributions import constraints
|
4 |
+
from torch.distributions.distribution import Distribution
|
5 |
+
from torch.distributions.utils import lazy_property, logits_to_probs, probs_to_logits
|
6 |
+
|
7 |
+
__all__ = ["Categorical"]
|
8 |
+
|
9 |
+
|
10 |
+
class Categorical(Distribution):
|
11 |
+
r"""
|
12 |
+
Creates a categorical distribution parameterized by either :attr:`probs` or
|
13 |
+
:attr:`logits` (but not both).
|
14 |
+
|
15 |
+
.. note::
|
16 |
+
It is equivalent to the distribution that :func:`torch.multinomial`
|
17 |
+
samples from.
|
18 |
+
|
19 |
+
Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``.
|
20 |
+
|
21 |
+
If `probs` is 1-dimensional with length-`K`, each element is the relative probability
|
22 |
+
of sampling the class at that index.
|
23 |
+
|
24 |
+
If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of
|
25 |
+
relative probability vectors.
|
26 |
+
|
27 |
+
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
|
28 |
+
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
|
29 |
+
will return this normalized value.
|
30 |
+
The `logits` argument will be interpreted as unnormalized log probabilities
|
31 |
+
and can therefore be any real number. It will likewise be normalized so that
|
32 |
+
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
|
33 |
+
will return this normalized value.
|
34 |
+
|
35 |
+
See also: :func:`torch.multinomial`
|
36 |
+
|
37 |
+
Example::
|
38 |
+
|
39 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
40 |
+
>>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
|
41 |
+
>>> m.sample() # equal probability of 0, 1, 2, 3
|
42 |
+
tensor(3)
|
43 |
+
|
44 |
+
Args:
|
45 |
+
probs (Tensor): event probabilities
|
46 |
+
logits (Tensor): event log probabilities (unnormalized)
|
47 |
+
"""
|
48 |
+
arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector}
|
49 |
+
has_enumerate_support = True
|
50 |
+
|
51 |
+
def __init__(self, probs=None, logits=None, validate_args=None):
|
52 |
+
if (probs is None) == (logits is None):
|
53 |
+
raise ValueError(
|
54 |
+
"Either `probs` or `logits` must be specified, but not both."
|
55 |
+
)
|
56 |
+
if probs is not None:
|
57 |
+
if probs.dim() < 1:
|
58 |
+
raise ValueError("`probs` parameter must be at least one-dimensional.")
|
59 |
+
self.probs = probs / probs.sum(-1, keepdim=True)
|
60 |
+
else:
|
61 |
+
if logits.dim() < 1:
|
62 |
+
raise ValueError("`logits` parameter must be at least one-dimensional.")
|
63 |
+
# Normalize
|
64 |
+
self.logits = logits - logits.logsumexp(dim=-1, keepdim=True)
|
65 |
+
self._param = self.probs if probs is not None else self.logits
|
66 |
+
self._num_events = self._param.size()[-1]
|
67 |
+
batch_shape = (
|
68 |
+
self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size()
|
69 |
+
)
|
70 |
+
super().__init__(batch_shape, validate_args=validate_args)
|
71 |
+
|
72 |
+
def expand(self, batch_shape, _instance=None):
|
73 |
+
new = self._get_checked_instance(Categorical, _instance)
|
74 |
+
batch_shape = torch.Size(batch_shape)
|
75 |
+
param_shape = batch_shape + torch.Size((self._num_events,))
|
76 |
+
if "probs" in self.__dict__:
|
77 |
+
new.probs = self.probs.expand(param_shape)
|
78 |
+
new._param = new.probs
|
79 |
+
if "logits" in self.__dict__:
|
80 |
+
new.logits = self.logits.expand(param_shape)
|
81 |
+
new._param = new.logits
|
82 |
+
new._num_events = self._num_events
|
83 |
+
super(Categorical, new).__init__(batch_shape, validate_args=False)
|
84 |
+
new._validate_args = self._validate_args
|
85 |
+
return new
|
86 |
+
|
87 |
+
def _new(self, *args, **kwargs):
|
88 |
+
return self._param.new(*args, **kwargs)
|
89 |
+
|
90 |
+
@constraints.dependent_property(is_discrete=True, event_dim=0)
|
91 |
+
def support(self):
|
92 |
+
return constraints.integer_interval(0, self._num_events - 1)
|
93 |
+
|
94 |
+
@lazy_property
|
95 |
+
def logits(self):
|
96 |
+
return probs_to_logits(self.probs)
|
97 |
+
|
98 |
+
@lazy_property
|
99 |
+
def probs(self):
|
100 |
+
return logits_to_probs(self.logits)
|
101 |
+
|
102 |
+
@property
|
103 |
+
def param_shape(self):
|
104 |
+
return self._param.size()
|
105 |
+
|
106 |
+
@property
|
107 |
+
def mean(self):
|
108 |
+
return torch.full(
|
109 |
+
self._extended_shape(),
|
110 |
+
nan,
|
111 |
+
dtype=self.probs.dtype,
|
112 |
+
device=self.probs.device,
|
113 |
+
)
|
114 |
+
|
115 |
+
@property
|
116 |
+
def mode(self):
|
117 |
+
return self.probs.argmax(axis=-1)
|
118 |
+
|
119 |
+
@property
|
120 |
+
def variance(self):
|
121 |
+
return torch.full(
|
122 |
+
self._extended_shape(),
|
123 |
+
nan,
|
124 |
+
dtype=self.probs.dtype,
|
125 |
+
device=self.probs.device,
|
126 |
+
)
|
127 |
+
|
128 |
+
def sample(self, sample_shape=torch.Size()):
|
129 |
+
if not isinstance(sample_shape, torch.Size):
|
130 |
+
sample_shape = torch.Size(sample_shape)
|
131 |
+
probs_2d = self.probs.reshape(-1, self._num_events)
|
132 |
+
samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T
|
133 |
+
return samples_2d.reshape(self._extended_shape(sample_shape))
|
134 |
+
|
135 |
+
def log_prob(self, value):
|
136 |
+
if self._validate_args:
|
137 |
+
self._validate_sample(value)
|
138 |
+
value = value.long().unsqueeze(-1)
|
139 |
+
value, log_pmf = torch.broadcast_tensors(value, self.logits)
|
140 |
+
value = value[..., :1]
|
141 |
+
return log_pmf.gather(-1, value).squeeze(-1)
|
142 |
+
|
143 |
+
def entropy(self):
|
144 |
+
min_real = torch.finfo(self.logits.dtype).min
|
145 |
+
logits = torch.clamp(self.logits, min=min_real)
|
146 |
+
p_log_p = logits * self.probs
|
147 |
+
return -p_log_p.sum(-1)
|
148 |
+
|
149 |
+
def enumerate_support(self, expand=True):
|
150 |
+
num_events = self._num_events
|
151 |
+
values = torch.arange(num_events, dtype=torch.long, device=self._param.device)
|
152 |
+
values = values.view((-1,) + (1,) * len(self._batch_shape))
|
153 |
+
if expand:
|
154 |
+
values = values.expand((-1,) + self._batch_shape)
|
155 |
+
return values
|
venv/lib/python3.10/site-packages/torch/distributions/cauchy.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from numbers import Number
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch import inf, nan
|
6 |
+
from torch.distributions import constraints
|
7 |
+
from torch.distributions.distribution import Distribution
|
8 |
+
from torch.distributions.utils import broadcast_all
|
9 |
+
|
10 |
+
__all__ = ["Cauchy"]
|
11 |
+
|
12 |
+
|
13 |
+
class Cauchy(Distribution):
|
14 |
+
r"""
|
15 |
+
Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of
|
16 |
+
independent normally distributed random variables with means `0` follows a
|
17 |
+
Cauchy distribution.
|
18 |
+
|
19 |
+
Example::
|
20 |
+
|
21 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
22 |
+
>>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))
|
23 |
+
>>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1
|
24 |
+
tensor([ 2.3214])
|
25 |
+
|
26 |
+
Args:
|
27 |
+
loc (float or Tensor): mode or median of the distribution.
|
28 |
+
scale (float or Tensor): half width at half maximum.
|
29 |
+
"""
|
30 |
+
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
|
31 |
+
support = constraints.real
|
32 |
+
has_rsample = True
|
33 |
+
|
34 |
+
def __init__(self, loc, scale, validate_args=None):
|
35 |
+
self.loc, self.scale = broadcast_all(loc, scale)
|
36 |
+
if isinstance(loc, Number) and isinstance(scale, Number):
|
37 |
+
batch_shape = torch.Size()
|
38 |
+
else:
|
39 |
+
batch_shape = self.loc.size()
|
40 |
+
super().__init__(batch_shape, validate_args=validate_args)
|
41 |
+
|
42 |
+
def expand(self, batch_shape, _instance=None):
|
43 |
+
new = self._get_checked_instance(Cauchy, _instance)
|
44 |
+
batch_shape = torch.Size(batch_shape)
|
45 |
+
new.loc = self.loc.expand(batch_shape)
|
46 |
+
new.scale = self.scale.expand(batch_shape)
|
47 |
+
super(Cauchy, new).__init__(batch_shape, validate_args=False)
|
48 |
+
new._validate_args = self._validate_args
|
49 |
+
return new
|
50 |
+
|
51 |
+
@property
|
52 |
+
def mean(self):
|
53 |
+
return torch.full(
|
54 |
+
self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device
|
55 |
+
)
|
56 |
+
|
57 |
+
@property
|
58 |
+
def mode(self):
|
59 |
+
return self.loc
|
60 |
+
|
61 |
+
@property
|
62 |
+
def variance(self):
|
63 |
+
return torch.full(
|
64 |
+
self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device
|
65 |
+
)
|
66 |
+
|
67 |
+
def rsample(self, sample_shape=torch.Size()):
|
68 |
+
shape = self._extended_shape(sample_shape)
|
69 |
+
eps = self.loc.new(shape).cauchy_()
|
70 |
+
return self.loc + eps * self.scale
|
71 |
+
|
72 |
+
def log_prob(self, value):
|
73 |
+
if self._validate_args:
|
74 |
+
self._validate_sample(value)
|
75 |
+
return (
|
76 |
+
-math.log(math.pi)
|
77 |
+
- self.scale.log()
|
78 |
+
- (((value - self.loc) / self.scale) ** 2).log1p()
|
79 |
+
)
|
80 |
+
|
81 |
+
def cdf(self, value):
|
82 |
+
if self._validate_args:
|
83 |
+
self._validate_sample(value)
|
84 |
+
return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
|
85 |
+
|
86 |
+
def icdf(self, value):
|
87 |
+
return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
|
88 |
+
|
89 |
+
def entropy(self):
|
90 |
+
return math.log(4 * math.pi) + self.scale.log()
|
venv/lib/python3.10/site-packages/torch/distributions/chi2.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.distributions import constraints
|
2 |
+
from torch.distributions.gamma import Gamma
|
3 |
+
|
4 |
+
__all__ = ["Chi2"]
|
5 |
+
|
6 |
+
|
7 |
+
class Chi2(Gamma):
|
8 |
+
r"""
|
9 |
+
Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`.
|
10 |
+
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
|
11 |
+
|
12 |
+
Example::
|
13 |
+
|
14 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
15 |
+
>>> m = Chi2(torch.tensor([1.0]))
|
16 |
+
>>> m.sample() # Chi2 distributed with shape df=1
|
17 |
+
tensor([ 0.1046])
|
18 |
+
|
19 |
+
Args:
|
20 |
+
df (float or Tensor): shape parameter of the distribution
|
21 |
+
"""
|
22 |
+
arg_constraints = {"df": constraints.positive}
|
23 |
+
|
24 |
+
def __init__(self, df, validate_args=None):
|
25 |
+
super().__init__(0.5 * df, 0.5, validate_args=validate_args)
|
26 |
+
|
27 |
+
def expand(self, batch_shape, _instance=None):
|
28 |
+
new = self._get_checked_instance(Chi2, _instance)
|
29 |
+
return super().expand(batch_shape, new)
|
30 |
+
|
31 |
+
@property
|
32 |
+
def df(self):
|
33 |
+
return self.concentration * 2
|
venv/lib/python3.10/site-packages/torch/distributions/independent.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.distributions import constraints
|
5 |
+
from torch.distributions.distribution import Distribution
|
6 |
+
from torch.distributions.utils import _sum_rightmost
|
7 |
+
|
8 |
+
__all__ = ["Independent"]
|
9 |
+
|
10 |
+
|
11 |
+
class Independent(Distribution):
|
12 |
+
r"""
|
13 |
+
Reinterprets some of the batch dims of a distribution as event dims.
|
14 |
+
|
15 |
+
This is mainly useful for changing the shape of the result of
|
16 |
+
:meth:`log_prob`. For example to create a diagonal Normal distribution with
|
17 |
+
the same shape as a Multivariate Normal distribution (so they are
|
18 |
+
interchangeable), you can::
|
19 |
+
|
20 |
+
>>> from torch.distributions.multivariate_normal import MultivariateNormal
|
21 |
+
>>> from torch.distributions.normal import Normal
|
22 |
+
>>> loc = torch.zeros(3)
|
23 |
+
>>> scale = torch.ones(3)
|
24 |
+
>>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale))
|
25 |
+
>>> [mvn.batch_shape, mvn.event_shape]
|
26 |
+
[torch.Size([]), torch.Size([3])]
|
27 |
+
>>> normal = Normal(loc, scale)
|
28 |
+
>>> [normal.batch_shape, normal.event_shape]
|
29 |
+
[torch.Size([3]), torch.Size([])]
|
30 |
+
>>> diagn = Independent(normal, 1)
|
31 |
+
>>> [diagn.batch_shape, diagn.event_shape]
|
32 |
+
[torch.Size([]), torch.Size([3])]
|
33 |
+
|
34 |
+
Args:
|
35 |
+
base_distribution (torch.distributions.distribution.Distribution): a
|
36 |
+
base distribution
|
37 |
+
reinterpreted_batch_ndims (int): the number of batch dims to
|
38 |
+
reinterpret as event dims
|
39 |
+
"""
|
40 |
+
arg_constraints: Dict[str, constraints.Constraint] = {}
|
41 |
+
|
42 |
+
def __init__(
|
43 |
+
self, base_distribution, reinterpreted_batch_ndims, validate_args=None
|
44 |
+
):
|
45 |
+
if reinterpreted_batch_ndims > len(base_distribution.batch_shape):
|
46 |
+
raise ValueError(
|
47 |
+
"Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
|
48 |
+
f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}"
|
49 |
+
)
|
50 |
+
shape = base_distribution.batch_shape + base_distribution.event_shape
|
51 |
+
event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape)
|
52 |
+
batch_shape = shape[: len(shape) - event_dim]
|
53 |
+
event_shape = shape[len(shape) - event_dim :]
|
54 |
+
self.base_dist = base_distribution
|
55 |
+
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
|
56 |
+
super().__init__(batch_shape, event_shape, validate_args=validate_args)
|
57 |
+
|
58 |
+
def expand(self, batch_shape, _instance=None):
|
59 |
+
new = self._get_checked_instance(Independent, _instance)
|
60 |
+
batch_shape = torch.Size(batch_shape)
|
61 |
+
new.base_dist = self.base_dist.expand(
|
62 |
+
batch_shape + self.event_shape[: self.reinterpreted_batch_ndims]
|
63 |
+
)
|
64 |
+
new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
|
65 |
+
super(Independent, new).__init__(
|
66 |
+
batch_shape, self.event_shape, validate_args=False
|
67 |
+
)
|
68 |
+
new._validate_args = self._validate_args
|
69 |
+
return new
|
70 |
+
|
71 |
+
@property
|
72 |
+
def has_rsample(self):
|
73 |
+
return self.base_dist.has_rsample
|
74 |
+
|
75 |
+
@property
|
76 |
+
def has_enumerate_support(self):
|
77 |
+
if self.reinterpreted_batch_ndims > 0:
|
78 |
+
return False
|
79 |
+
return self.base_dist.has_enumerate_support
|
80 |
+
|
81 |
+
@constraints.dependent_property
|
82 |
+
def support(self):
|
83 |
+
result = self.base_dist.support
|
84 |
+
if self.reinterpreted_batch_ndims:
|
85 |
+
result = constraints.independent(result, self.reinterpreted_batch_ndims)
|
86 |
+
return result
|
87 |
+
|
88 |
+
@property
|
89 |
+
def mean(self):
|
90 |
+
return self.base_dist.mean
|
91 |
+
|
92 |
+
@property
|
93 |
+
def mode(self):
|
94 |
+
return self.base_dist.mode
|
95 |
+
|
96 |
+
@property
|
97 |
+
def variance(self):
|
98 |
+
return self.base_dist.variance
|
99 |
+
|
100 |
+
def sample(self, sample_shape=torch.Size()):
|
101 |
+
return self.base_dist.sample(sample_shape)
|
102 |
+
|
103 |
+
def rsample(self, sample_shape=torch.Size()):
|
104 |
+
return self.base_dist.rsample(sample_shape)
|
105 |
+
|
106 |
+
def log_prob(self, value):
|
107 |
+
log_prob = self.base_dist.log_prob(value)
|
108 |
+
return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
|
109 |
+
|
110 |
+
def entropy(self):
|
111 |
+
entropy = self.base_dist.entropy()
|
112 |
+
return _sum_rightmost(entropy, self.reinterpreted_batch_ndims)
|
113 |
+
|
114 |
+
def enumerate_support(self, expand=True):
|
115 |
+
if self.reinterpreted_batch_ndims > 0:
|
116 |
+
raise NotImplementedError(
|
117 |
+
"Enumeration over cartesian product is not implemented"
|
118 |
+
)
|
119 |
+
return self.base_dist.enumerate_support(expand=expand)
|
120 |
+
|
121 |
+
def __repr__(self):
|
122 |
+
return (
|
123 |
+
self.__class__.__name__
|
124 |
+
+ f"({self.base_dist}, {self.reinterpreted_batch_ndims})"
|
125 |
+
)
|
venv/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro).
|
3 |
+
|
4 |
+
Original copyright notice:
|
5 |
+
|
6 |
+
# Copyright: Contributors to the Pyro project.
|
7 |
+
# SPDX-License-Identifier: Apache-2.0
|
8 |
+
"""
|
9 |
+
|
10 |
+
import math
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from torch.distributions import Beta, constraints
|
14 |
+
from torch.distributions.distribution import Distribution
|
15 |
+
from torch.distributions.utils import broadcast_all
|
16 |
+
|
17 |
+
__all__ = ["LKJCholesky"]
|
18 |
+
|
19 |
+
|
20 |
+
class LKJCholesky(Distribution):
|
21 |
+
r"""
|
22 |
+
LKJ distribution for lower Cholesky factor of correlation matrices.
|
23 |
+
The distribution is controlled by ``concentration`` parameter :math:`\eta`
|
24 |
+
to make the probability of the correlation matrix :math:`M` generated from
|
25 |
+
a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that,
|
26 |
+
when ``concentration == 1``, we have a uniform distribution over Cholesky
|
27 |
+
factors of correlation matrices::
|
28 |
+
|
29 |
+
L ~ LKJCholesky(dim, concentration)
|
30 |
+
X = L @ L' ~ LKJCorr(dim, concentration)
|
31 |
+
|
32 |
+
Note that this distribution samples the
|
33 |
+
Cholesky factor of correlation matrices and not the correlation matrices
|
34 |
+
themselves and thereby differs slightly from the derivations in [1] for
|
35 |
+
the `LKJCorr` distribution. For sampling, this uses the Onion method from
|
36 |
+
[1] Section 3.
|
37 |
+
|
38 |
+
Example::
|
39 |
+
|
40 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
41 |
+
>>> l = LKJCholesky(3, 0.5)
|
42 |
+
>>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix
|
43 |
+
tensor([[ 1.0000, 0.0000, 0.0000],
|
44 |
+
[ 0.3516, 0.9361, 0.0000],
|
45 |
+
[-0.1899, 0.4748, 0.8593]])
|
46 |
+
|
47 |
+
Args:
|
48 |
+
dimension (dim): dimension of the matrices
|
49 |
+
concentration (float or Tensor): concentration/shape parameter of the
|
50 |
+
distribution (often referred to as eta)
|
51 |
+
|
52 |
+
**References**
|
53 |
+
|
54 |
+
[1] `Generating random correlation matrices based on vines and extended onion method` (2009),
|
55 |
+
Daniel Lewandowski, Dorota Kurowicka, Harry Joe.
|
56 |
+
Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008
|
57 |
+
"""
|
58 |
+
arg_constraints = {"concentration": constraints.positive}
|
59 |
+
support = constraints.corr_cholesky
|
60 |
+
|
61 |
+
def __init__(self, dim, concentration=1.0, validate_args=None):
|
62 |
+
if dim < 2:
|
63 |
+
raise ValueError(
|
64 |
+
f"Expected dim to be an integer greater than or equal to 2. Found dim={dim}."
|
65 |
+
)
|
66 |
+
self.dim = dim
|
67 |
+
(self.concentration,) = broadcast_all(concentration)
|
68 |
+
batch_shape = self.concentration.size()
|
69 |
+
event_shape = torch.Size((dim, dim))
|
70 |
+
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
|
71 |
+
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
|
72 |
+
offset = torch.arange(
|
73 |
+
self.dim - 1,
|
74 |
+
dtype=self.concentration.dtype,
|
75 |
+
device=self.concentration.device,
|
76 |
+
)
|
77 |
+
offset = torch.cat([offset.new_zeros((1,)), offset])
|
78 |
+
beta_conc1 = offset + 0.5
|
79 |
+
beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
|
80 |
+
self._beta = Beta(beta_conc1, beta_conc0)
|
81 |
+
super().__init__(batch_shape, event_shape, validate_args)
|
82 |
+
|
83 |
+
def expand(self, batch_shape, _instance=None):
|
84 |
+
new = self._get_checked_instance(LKJCholesky, _instance)
|
85 |
+
batch_shape = torch.Size(batch_shape)
|
86 |
+
new.dim = self.dim
|
87 |
+
new.concentration = self.concentration.expand(batch_shape)
|
88 |
+
new._beta = self._beta.expand(batch_shape + (self.dim,))
|
89 |
+
super(LKJCholesky, new).__init__(
|
90 |
+
batch_shape, self.event_shape, validate_args=False
|
91 |
+
)
|
92 |
+
new._validate_args = self._validate_args
|
93 |
+
return new
|
94 |
+
|
95 |
+
def sample(self, sample_shape=torch.Size()):
|
96 |
+
# This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
|
97 |
+
# - This vectorizes the for loop and also works for heterogeneous eta.
|
98 |
+
# - Same algorithm generalizes to n=1.
|
99 |
+
# - The procedure is simplified since we are sampling the cholesky factor of
|
100 |
+
# the correlation matrix instead of the correlation matrix itself. As such,
|
101 |
+
# we only need to generate `w`.
|
102 |
+
y = self._beta.sample(sample_shape).unsqueeze(-1)
|
103 |
+
u_normal = torch.randn(
|
104 |
+
self._extended_shape(sample_shape), dtype=y.dtype, device=y.device
|
105 |
+
).tril(-1)
|
106 |
+
u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
|
107 |
+
# Replace NaNs in first row
|
108 |
+
u_hypersphere[..., 0, :].fill_(0.0)
|
109 |
+
w = torch.sqrt(y) * u_hypersphere
|
110 |
+
# Fill diagonal elements; clamp for numerical stability
|
111 |
+
eps = torch.finfo(w.dtype).tiny
|
112 |
+
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
|
113 |
+
w += torch.diag_embed(diag_elems)
|
114 |
+
return w
|
115 |
+
|
116 |
+
def log_prob(self, value):
|
117 |
+
# See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
|
118 |
+
# The probability of a correlation matrix is proportional to
|
119 |
+
# determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1))
|
120 |
+
# Additionally, the Jacobian of the transformation from Cholesky factor to
|
121 |
+
# correlation matrix is:
|
122 |
+
# prod(L_ii ^ (D - i))
|
123 |
+
# So the probability of a Cholesky factor is propotional to
|
124 |
+
# prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i)
|
125 |
+
# with order_i = 2 * concentration - 2 + D - i
|
126 |
+
if self._validate_args:
|
127 |
+
self._validate_sample(value)
|
128 |
+
diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:]
|
129 |
+
order = torch.arange(2, self.dim + 1, device=self.concentration.device)
|
130 |
+
order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order
|
131 |
+
unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1)
|
132 |
+
# Compute normalization constant (page 1999 of [1])
|
133 |
+
dm1 = self.dim - 1
|
134 |
+
alpha = self.concentration + 0.5 * dm1
|
135 |
+
denominator = torch.lgamma(alpha) * dm1
|
136 |
+
numerator = torch.mvlgamma(alpha - 0.5, dm1)
|
137 |
+
# pi_constant in [1] is D * (D - 1) / 4 * log(pi)
|
138 |
+
# pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi)
|
139 |
+
# hence, we need to add a pi_constant = (D - 1) * log(pi) / 2
|
140 |
+
pi_constant = 0.5 * dm1 * math.log(math.pi)
|
141 |
+
normalize_term = pi_constant + numerator - denominator
|
142 |
+
return unnormalized_log_pdf - normalize_term
|
venv/lib/python3.10/site-packages/torch/distributions/log_normal.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.distributions import constraints
|
2 |
+
from torch.distributions.normal import Normal
|
3 |
+
from torch.distributions.transformed_distribution import TransformedDistribution
|
4 |
+
from torch.distributions.transforms import ExpTransform
|
5 |
+
|
6 |
+
__all__ = ["LogNormal"]
|
7 |
+
|
8 |
+
|
9 |
+
class LogNormal(TransformedDistribution):
|
10 |
+
r"""
|
11 |
+
Creates a log-normal distribution parameterized by
|
12 |
+
:attr:`loc` and :attr:`scale` where::
|
13 |
+
|
14 |
+
X ~ Normal(loc, scale)
|
15 |
+
Y = exp(X) ~ LogNormal(loc, scale)
|
16 |
+
|
17 |
+
Example::
|
18 |
+
|
19 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
20 |
+
>>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
|
21 |
+
>>> m.sample() # log-normal distributed with mean=0 and stddev=1
|
22 |
+
tensor([ 0.1046])
|
23 |
+
|
24 |
+
Args:
|
25 |
+
loc (float or Tensor): mean of log of distribution
|
26 |
+
scale (float or Tensor): standard deviation of log of the distribution
|
27 |
+
"""
|
28 |
+
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
|
29 |
+
support = constraints.positive
|
30 |
+
has_rsample = True
|
31 |
+
|
32 |
+
def __init__(self, loc, scale, validate_args=None):
|
33 |
+
base_dist = Normal(loc, scale, validate_args=validate_args)
|
34 |
+
super().__init__(base_dist, ExpTransform(), validate_args=validate_args)
|
35 |
+
|
36 |
+
def expand(self, batch_shape, _instance=None):
|
37 |
+
new = self._get_checked_instance(LogNormal, _instance)
|
38 |
+
return super().expand(batch_shape, _instance=new)
|
39 |
+
|
40 |
+
@property
|
41 |
+
def loc(self):
|
42 |
+
return self.base_dist.loc
|
43 |
+
|
44 |
+
@property
|
45 |
+
def scale(self):
|
46 |
+
return self.base_dist.scale
|
47 |
+
|
48 |
+
@property
|
49 |
+
def mean(self):
|
50 |
+
return (self.loc + self.scale.pow(2) / 2).exp()
|
51 |
+
|
52 |
+
@property
|
53 |
+
def mode(self):
|
54 |
+
return (self.loc - self.scale.square()).exp()
|
55 |
+
|
56 |
+
@property
|
57 |
+
def variance(self):
|
58 |
+
scale_sq = self.scale.pow(2)
|
59 |
+
return scale_sq.expm1() * (2 * self.loc + scale_sq).exp()
|
60 |
+
|
61 |
+
def entropy(self):
|
62 |
+
return self.base_dist.entropy() + self.loc
|
venv/lib/python3.10/site-packages/torch/distributions/multinomial.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import inf
|
3 |
+
from torch.distributions import Categorical, constraints
|
4 |
+
from torch.distributions.binomial import Binomial
|
5 |
+
from torch.distributions.distribution import Distribution
|
6 |
+
from torch.distributions.utils import broadcast_all
|
7 |
+
|
8 |
+
__all__ = ["Multinomial"]
|
9 |
+
|
10 |
+
|
11 |
+
class Multinomial(Distribution):
|
12 |
+
r"""
|
13 |
+
Creates a Multinomial distribution parameterized by :attr:`total_count` and
|
14 |
+
either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of
|
15 |
+
:attr:`probs` indexes over categories. All other dimensions index over batches.
|
16 |
+
|
17 |
+
Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is
|
18 |
+
called (see example below)
|
19 |
+
|
20 |
+
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
|
21 |
+
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
|
22 |
+
will return this normalized value.
|
23 |
+
The `logits` argument will be interpreted as unnormalized log probabilities
|
24 |
+
and can therefore be any real number. It will likewise be normalized so that
|
25 |
+
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
|
26 |
+
will return this normalized value.
|
27 |
+
|
28 |
+
- :meth:`sample` requires a single shared `total_count` for all
|
29 |
+
parameters and samples.
|
30 |
+
- :meth:`log_prob` allows different `total_count` for each parameter and
|
31 |
+
sample.
|
32 |
+
|
33 |
+
Example::
|
34 |
+
|
35 |
+
>>> # xdoctest: +SKIP("FIXME: found invalid values")
|
36 |
+
>>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.]))
|
37 |
+
>>> x = m.sample() # equal probability of 0, 1, 2, 3
|
38 |
+
tensor([ 21., 24., 30., 25.])
|
39 |
+
|
40 |
+
>>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x)
|
41 |
+
tensor([-4.1338])
|
42 |
+
|
43 |
+
Args:
|
44 |
+
total_count (int): number of trials
|
45 |
+
probs (Tensor): event probabilities
|
46 |
+
logits (Tensor): event log probabilities (unnormalized)
|
47 |
+
"""
|
48 |
+
arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector}
|
49 |
+
total_count: int
|
50 |
+
|
51 |
+
@property
|
52 |
+
def mean(self):
|
53 |
+
return self.probs * self.total_count
|
54 |
+
|
55 |
+
@property
|
56 |
+
def variance(self):
|
57 |
+
return self.total_count * self.probs * (1 - self.probs)
|
58 |
+
|
59 |
+
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
|
60 |
+
if not isinstance(total_count, int):
|
61 |
+
raise NotImplementedError("inhomogeneous total_count is not supported")
|
62 |
+
self.total_count = total_count
|
63 |
+
self._categorical = Categorical(probs=probs, logits=logits)
|
64 |
+
self._binomial = Binomial(total_count=total_count, probs=self.probs)
|
65 |
+
batch_shape = self._categorical.batch_shape
|
66 |
+
event_shape = self._categorical.param_shape[-1:]
|
67 |
+
super().__init__(batch_shape, event_shape, validate_args=validate_args)
|
68 |
+
|
69 |
+
def expand(self, batch_shape, _instance=None):
|
70 |
+
new = self._get_checked_instance(Multinomial, _instance)
|
71 |
+
batch_shape = torch.Size(batch_shape)
|
72 |
+
new.total_count = self.total_count
|
73 |
+
new._categorical = self._categorical.expand(batch_shape)
|
74 |
+
super(Multinomial, new).__init__(
|
75 |
+
batch_shape, self.event_shape, validate_args=False
|
76 |
+
)
|
77 |
+
new._validate_args = self._validate_args
|
78 |
+
return new
|
79 |
+
|
80 |
+
def _new(self, *args, **kwargs):
|
81 |
+
return self._categorical._new(*args, **kwargs)
|
82 |
+
|
83 |
+
@constraints.dependent_property(is_discrete=True, event_dim=1)
|
84 |
+
def support(self):
|
85 |
+
return constraints.multinomial(self.total_count)
|
86 |
+
|
87 |
+
@property
|
88 |
+
def logits(self):
|
89 |
+
return self._categorical.logits
|
90 |
+
|
91 |
+
@property
|
92 |
+
def probs(self):
|
93 |
+
return self._categorical.probs
|
94 |
+
|
95 |
+
@property
|
96 |
+
def param_shape(self):
|
97 |
+
return self._categorical.param_shape
|
98 |
+
|
99 |
+
def sample(self, sample_shape=torch.Size()):
|
100 |
+
sample_shape = torch.Size(sample_shape)
|
101 |
+
samples = self._categorical.sample(
|
102 |
+
torch.Size((self.total_count,)) + sample_shape
|
103 |
+
)
|
104 |
+
# samples.shape is (total_count, sample_shape, batch_shape), need to change it to
|
105 |
+
# (sample_shape, batch_shape, total_count)
|
106 |
+
shifted_idx = list(range(samples.dim()))
|
107 |
+
shifted_idx.append(shifted_idx.pop(0))
|
108 |
+
samples = samples.permute(*shifted_idx)
|
109 |
+
counts = samples.new(self._extended_shape(sample_shape)).zero_()
|
110 |
+
counts.scatter_add_(-1, samples, torch.ones_like(samples))
|
111 |
+
return counts.type_as(self.probs)
|
112 |
+
|
113 |
+
def entropy(self):
|
114 |
+
n = torch.tensor(self.total_count)
|
115 |
+
|
116 |
+
cat_entropy = self._categorical.entropy()
|
117 |
+
term1 = n * cat_entropy - torch.lgamma(n + 1)
|
118 |
+
|
119 |
+
support = self._binomial.enumerate_support(expand=False)[1:]
|
120 |
+
binomial_probs = torch.exp(self._binomial.log_prob(support))
|
121 |
+
weights = torch.lgamma(support + 1)
|
122 |
+
term2 = (binomial_probs * weights).sum([0, -1])
|
123 |
+
|
124 |
+
return term1 + term2
|
125 |
+
|
126 |
+
def log_prob(self, value):
|
127 |
+
if self._validate_args:
|
128 |
+
self._validate_sample(value)
|
129 |
+
logits, value = broadcast_all(self.logits, value)
|
130 |
+
logits = logits.clone(memory_format=torch.contiguous_format)
|
131 |
+
log_factorial_n = torch.lgamma(value.sum(-1) + 1)
|
132 |
+
log_factorial_xs = torch.lgamma(value + 1).sum(-1)
|
133 |
+
logits[(value == 0) & (logits == -inf)] = 0
|
134 |
+
log_powers = (logits * value).sum(-1)
|
135 |
+
return log_factorial_n - log_factorial_xs + log_powers
|
venv/lib/python3.10/site-packages/torch/distributions/pareto.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.distributions import constraints
|
2 |
+
from torch.distributions.exponential import Exponential
|
3 |
+
from torch.distributions.transformed_distribution import TransformedDistribution
|
4 |
+
from torch.distributions.transforms import AffineTransform, ExpTransform
|
5 |
+
from torch.distributions.utils import broadcast_all
|
6 |
+
|
7 |
+
__all__ = ["Pareto"]
|
8 |
+
|
9 |
+
|
10 |
+
class Pareto(TransformedDistribution):
|
11 |
+
r"""
|
12 |
+
Samples from a Pareto Type 1 distribution.
|
13 |
+
|
14 |
+
Example::
|
15 |
+
|
16 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
17 |
+
>>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
|
18 |
+
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
|
19 |
+
tensor([ 1.5623])
|
20 |
+
|
21 |
+
Args:
|
22 |
+
scale (float or Tensor): Scale parameter of the distribution
|
23 |
+
alpha (float or Tensor): Shape parameter of the distribution
|
24 |
+
"""
|
25 |
+
arg_constraints = {"alpha": constraints.positive, "scale": constraints.positive}
|
26 |
+
|
27 |
+
def __init__(self, scale, alpha, validate_args=None):
|
28 |
+
self.scale, self.alpha = broadcast_all(scale, alpha)
|
29 |
+
base_dist = Exponential(self.alpha, validate_args=validate_args)
|
30 |
+
transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
|
31 |
+
super().__init__(base_dist, transforms, validate_args=validate_args)
|
32 |
+
|
33 |
+
def expand(self, batch_shape, _instance=None):
|
34 |
+
new = self._get_checked_instance(Pareto, _instance)
|
35 |
+
new.scale = self.scale.expand(batch_shape)
|
36 |
+
new.alpha = self.alpha.expand(batch_shape)
|
37 |
+
return super().expand(batch_shape, _instance=new)
|
38 |
+
|
39 |
+
@property
|
40 |
+
def mean(self):
|
41 |
+
# mean is inf for alpha <= 1
|
42 |
+
a = self.alpha.clamp(min=1)
|
43 |
+
return a * self.scale / (a - 1)
|
44 |
+
|
45 |
+
@property
|
46 |
+
def mode(self):
|
47 |
+
return self.scale
|
48 |
+
|
49 |
+
@property
|
50 |
+
def variance(self):
|
51 |
+
# var is inf for alpha <= 2
|
52 |
+
a = self.alpha.clamp(min=2)
|
53 |
+
return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))
|
54 |
+
|
55 |
+
@constraints.dependent_property(is_discrete=False, event_dim=0)
|
56 |
+
def support(self):
|
57 |
+
return constraints.greater_than_eq(self.scale)
|
58 |
+
|
59 |
+
def entropy(self):
|
60 |
+
return (self.scale / self.alpha).log() + (1 + self.alpha.reciprocal())
|
venv/lib/python3.10/site-packages/torch/distributions/relaxed_bernoulli.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numbers import Number
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.distributions import constraints
|
5 |
+
from torch.distributions.distribution import Distribution
|
6 |
+
from torch.distributions.transformed_distribution import TransformedDistribution
|
7 |
+
from torch.distributions.transforms import SigmoidTransform
|
8 |
+
from torch.distributions.utils import (
|
9 |
+
broadcast_all,
|
10 |
+
clamp_probs,
|
11 |
+
lazy_property,
|
12 |
+
logits_to_probs,
|
13 |
+
probs_to_logits,
|
14 |
+
)
|
15 |
+
|
16 |
+
__all__ = ["LogitRelaxedBernoulli", "RelaxedBernoulli"]
|
17 |
+
|
18 |
+
|
19 |
+
class LogitRelaxedBernoulli(Distribution):
|
20 |
+
r"""
|
21 |
+
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
|
22 |
+
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
|
23 |
+
distribution.
|
24 |
+
|
25 |
+
Samples are logits of values in (0, 1). See [1] for more details.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
temperature (Tensor): relaxation temperature
|
29 |
+
probs (Number, Tensor): the probability of sampling `1`
|
30 |
+
logits (Number, Tensor): the log-odds of sampling `1`
|
31 |
+
|
32 |
+
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random
|
33 |
+
Variables (Maddison et al, 2017)
|
34 |
+
|
35 |
+
[2] Categorical Reparametrization with Gumbel-Softmax
|
36 |
+
(Jang et al, 2017)
|
37 |
+
"""
|
38 |
+
arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
|
39 |
+
support = constraints.real
|
40 |
+
|
41 |
+
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
|
42 |
+
self.temperature = temperature
|
43 |
+
if (probs is None) == (logits is None):
|
44 |
+
raise ValueError(
|
45 |
+
"Either `probs` or `logits` must be specified, but not both."
|
46 |
+
)
|
47 |
+
if probs is not None:
|
48 |
+
is_scalar = isinstance(probs, Number)
|
49 |
+
(self.probs,) = broadcast_all(probs)
|
50 |
+
else:
|
51 |
+
is_scalar = isinstance(logits, Number)
|
52 |
+
(self.logits,) = broadcast_all(logits)
|
53 |
+
self._param = self.probs if probs is not None else self.logits
|
54 |
+
if is_scalar:
|
55 |
+
batch_shape = torch.Size()
|
56 |
+
else:
|
57 |
+
batch_shape = self._param.size()
|
58 |
+
super().__init__(batch_shape, validate_args=validate_args)
|
59 |
+
|
60 |
+
def expand(self, batch_shape, _instance=None):
|
61 |
+
new = self._get_checked_instance(LogitRelaxedBernoulli, _instance)
|
62 |
+
batch_shape = torch.Size(batch_shape)
|
63 |
+
new.temperature = self.temperature
|
64 |
+
if "probs" in self.__dict__:
|
65 |
+
new.probs = self.probs.expand(batch_shape)
|
66 |
+
new._param = new.probs
|
67 |
+
if "logits" in self.__dict__:
|
68 |
+
new.logits = self.logits.expand(batch_shape)
|
69 |
+
new._param = new.logits
|
70 |
+
super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False)
|
71 |
+
new._validate_args = self._validate_args
|
72 |
+
return new
|
73 |
+
|
74 |
+
def _new(self, *args, **kwargs):
|
75 |
+
return self._param.new(*args, **kwargs)
|
76 |
+
|
77 |
+
@lazy_property
|
78 |
+
def logits(self):
|
79 |
+
return probs_to_logits(self.probs, is_binary=True)
|
80 |
+
|
81 |
+
@lazy_property
|
82 |
+
def probs(self):
|
83 |
+
return logits_to_probs(self.logits, is_binary=True)
|
84 |
+
|
85 |
+
@property
|
86 |
+
def param_shape(self):
|
87 |
+
return self._param.size()
|
88 |
+
|
89 |
+
def rsample(self, sample_shape=torch.Size()):
|
90 |
+
shape = self._extended_shape(sample_shape)
|
91 |
+
probs = clamp_probs(self.probs.expand(shape))
|
92 |
+
uniforms = clamp_probs(
|
93 |
+
torch.rand(shape, dtype=probs.dtype, device=probs.device)
|
94 |
+
)
|
95 |
+
return (
|
96 |
+
uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()
|
97 |
+
) / self.temperature
|
98 |
+
|
99 |
+
def log_prob(self, value):
|
100 |
+
if self._validate_args:
|
101 |
+
self._validate_sample(value)
|
102 |
+
logits, value = broadcast_all(self.logits, value)
|
103 |
+
diff = logits - value.mul(self.temperature)
|
104 |
+
return self.temperature.log() + diff - 2 * diff.exp().log1p()
|
105 |
+
|
106 |
+
|
107 |
+
class RelaxedBernoulli(TransformedDistribution):
|
108 |
+
r"""
|
109 |
+
Creates a RelaxedBernoulli distribution, parametrized by
|
110 |
+
:attr:`temperature`, and either :attr:`probs` or :attr:`logits`
|
111 |
+
(but not both). This is a relaxed version of the `Bernoulli` distribution,
|
112 |
+
so the values are in (0, 1), and has reparametrizable samples.
|
113 |
+
|
114 |
+
Example::
|
115 |
+
|
116 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
117 |
+
>>> m = RelaxedBernoulli(torch.tensor([2.2]),
|
118 |
+
... torch.tensor([0.1, 0.2, 0.3, 0.99]))
|
119 |
+
>>> m.sample()
|
120 |
+
tensor([ 0.2951, 0.3442, 0.8918, 0.9021])
|
121 |
+
|
122 |
+
Args:
|
123 |
+
temperature (Tensor): relaxation temperature
|
124 |
+
probs (Number, Tensor): the probability of sampling `1`
|
125 |
+
logits (Number, Tensor): the log-odds of sampling `1`
|
126 |
+
"""
|
127 |
+
arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
|
128 |
+
support = constraints.unit_interval
|
129 |
+
has_rsample = True
|
130 |
+
|
131 |
+
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
|
132 |
+
base_dist = LogitRelaxedBernoulli(temperature, probs, logits)
|
133 |
+
super().__init__(base_dist, SigmoidTransform(), validate_args=validate_args)
|
134 |
+
|
135 |
+
def expand(self, batch_shape, _instance=None):
|
136 |
+
new = self._get_checked_instance(RelaxedBernoulli, _instance)
|
137 |
+
return super().expand(batch_shape, _instance=new)
|
138 |
+
|
139 |
+
@property
|
140 |
+
def temperature(self):
|
141 |
+
return self.base_dist.temperature
|
142 |
+
|
143 |
+
@property
|
144 |
+
def logits(self):
|
145 |
+
return self.base_dist.logits
|
146 |
+
|
147 |
+
@property
|
148 |
+
def probs(self):
|
149 |
+
return self.base_dist.probs
|
venv/lib/python3.10/site-packages/torch/distributions/transforms.py
ADDED
@@ -0,0 +1,1245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import math
|
3 |
+
import numbers
|
4 |
+
import operator
|
5 |
+
import weakref
|
6 |
+
from typing import List
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from torch.distributions import constraints
|
11 |
+
from torch.distributions.utils import (
|
12 |
+
_sum_rightmost,
|
13 |
+
broadcast_all,
|
14 |
+
lazy_property,
|
15 |
+
tril_matrix_to_vec,
|
16 |
+
vec_to_tril_matrix,
|
17 |
+
)
|
18 |
+
from torch.nn.functional import pad, softplus
|
19 |
+
|
20 |
+
__all__ = [
|
21 |
+
"AbsTransform",
|
22 |
+
"AffineTransform",
|
23 |
+
"CatTransform",
|
24 |
+
"ComposeTransform",
|
25 |
+
"CorrCholeskyTransform",
|
26 |
+
"CumulativeDistributionTransform",
|
27 |
+
"ExpTransform",
|
28 |
+
"IndependentTransform",
|
29 |
+
"LowerCholeskyTransform",
|
30 |
+
"PositiveDefiniteTransform",
|
31 |
+
"PowerTransform",
|
32 |
+
"ReshapeTransform",
|
33 |
+
"SigmoidTransform",
|
34 |
+
"SoftplusTransform",
|
35 |
+
"TanhTransform",
|
36 |
+
"SoftmaxTransform",
|
37 |
+
"StackTransform",
|
38 |
+
"StickBreakingTransform",
|
39 |
+
"Transform",
|
40 |
+
"identity_transform",
|
41 |
+
]
|
42 |
+
|
43 |
+
|
44 |
+
class Transform:
|
45 |
+
"""
|
46 |
+
Abstract class for invertable transformations with computable log
|
47 |
+
det jacobians. They are primarily used in
|
48 |
+
:class:`torch.distributions.TransformedDistribution`.
|
49 |
+
|
50 |
+
Caching is useful for transforms whose inverses are either expensive or
|
51 |
+
numerically unstable. Note that care must be taken with memoized values
|
52 |
+
since the autograd graph may be reversed. For example while the following
|
53 |
+
works with or without caching::
|
54 |
+
|
55 |
+
y = t(x)
|
56 |
+
t.log_abs_det_jacobian(x, y).backward() # x will receive gradients.
|
57 |
+
|
58 |
+
However the following will error when caching due to dependency reversal::
|
59 |
+
|
60 |
+
y = t(x)
|
61 |
+
z = t.inv(y)
|
62 |
+
grad(z.sum(), [y]) # error because z is x
|
63 |
+
|
64 |
+
Derived classes should implement one or both of :meth:`_call` or
|
65 |
+
:meth:`_inverse`. Derived classes that set `bijective=True` should also
|
66 |
+
implement :meth:`log_abs_det_jacobian`.
|
67 |
+
|
68 |
+
Args:
|
69 |
+
cache_size (int): Size of cache. If zero, no caching is done. If one,
|
70 |
+
the latest single value is cached. Only 0 and 1 are supported.
|
71 |
+
|
72 |
+
Attributes:
|
73 |
+
domain (:class:`~torch.distributions.constraints.Constraint`):
|
74 |
+
The constraint representing valid inputs to this transform.
|
75 |
+
codomain (:class:`~torch.distributions.constraints.Constraint`):
|
76 |
+
The constraint representing valid outputs to this transform
|
77 |
+
which are inputs to the inverse transform.
|
78 |
+
bijective (bool): Whether this transform is bijective. A transform
|
79 |
+
``t`` is bijective iff ``t.inv(t(x)) == x`` and
|
80 |
+
``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in
|
81 |
+
the codomain. Transforms that are not bijective should at least
|
82 |
+
maintain the weaker pseudoinverse properties
|
83 |
+
``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``.
|
84 |
+
sign (int or Tensor): For bijective univariate transforms, this
|
85 |
+
should be +1 or -1 depending on whether transform is monotone
|
86 |
+
increasing or decreasing.
|
87 |
+
"""
|
88 |
+
|
89 |
+
bijective = False
|
90 |
+
domain: constraints.Constraint
|
91 |
+
codomain: constraints.Constraint
|
92 |
+
|
93 |
+
def __init__(self, cache_size=0):
|
94 |
+
self._cache_size = cache_size
|
95 |
+
self._inv = None
|
96 |
+
if cache_size == 0:
|
97 |
+
pass # default behavior
|
98 |
+
elif cache_size == 1:
|
99 |
+
self._cached_x_y = None, None
|
100 |
+
else:
|
101 |
+
raise ValueError("cache_size must be 0 or 1")
|
102 |
+
super().__init__()
|
103 |
+
|
104 |
+
def __getstate__(self):
|
105 |
+
state = self.__dict__.copy()
|
106 |
+
state["_inv"] = None
|
107 |
+
return state
|
108 |
+
|
109 |
+
@property
|
110 |
+
def event_dim(self):
|
111 |
+
if self.domain.event_dim == self.codomain.event_dim:
|
112 |
+
return self.domain.event_dim
|
113 |
+
raise ValueError("Please use either .domain.event_dim or .codomain.event_dim")
|
114 |
+
|
115 |
+
@property
|
116 |
+
def inv(self):
|
117 |
+
"""
|
118 |
+
Returns the inverse :class:`Transform` of this transform.
|
119 |
+
This should satisfy ``t.inv.inv is t``.
|
120 |
+
"""
|
121 |
+
inv = None
|
122 |
+
if self._inv is not None:
|
123 |
+
inv = self._inv()
|
124 |
+
if inv is None:
|
125 |
+
inv = _InverseTransform(self)
|
126 |
+
self._inv = weakref.ref(inv)
|
127 |
+
return inv
|
128 |
+
|
129 |
+
@property
|
130 |
+
def sign(self):
|
131 |
+
"""
|
132 |
+
Returns the sign of the determinant of the Jacobian, if applicable.
|
133 |
+
In general this only makes sense for bijective transforms.
|
134 |
+
"""
|
135 |
+
raise NotImplementedError
|
136 |
+
|
137 |
+
def with_cache(self, cache_size=1):
|
138 |
+
if self._cache_size == cache_size:
|
139 |
+
return self
|
140 |
+
if type(self).__init__ is Transform.__init__:
|
141 |
+
return type(self)(cache_size=cache_size)
|
142 |
+
raise NotImplementedError(f"{type(self)}.with_cache is not implemented")
|
143 |
+
|
144 |
+
def __eq__(self, other):
|
145 |
+
return self is other
|
146 |
+
|
147 |
+
def __ne__(self, other):
|
148 |
+
# Necessary for Python2
|
149 |
+
return not self.__eq__(other)
|
150 |
+
|
151 |
+
def __call__(self, x):
|
152 |
+
"""
|
153 |
+
Computes the transform `x => y`.
|
154 |
+
"""
|
155 |
+
if self._cache_size == 0:
|
156 |
+
return self._call(x)
|
157 |
+
x_old, y_old = self._cached_x_y
|
158 |
+
if x is x_old:
|
159 |
+
return y_old
|
160 |
+
y = self._call(x)
|
161 |
+
self._cached_x_y = x, y
|
162 |
+
return y
|
163 |
+
|
164 |
+
def _inv_call(self, y):
|
165 |
+
"""
|
166 |
+
Inverts the transform `y => x`.
|
167 |
+
"""
|
168 |
+
if self._cache_size == 0:
|
169 |
+
return self._inverse(y)
|
170 |
+
x_old, y_old = self._cached_x_y
|
171 |
+
if y is y_old:
|
172 |
+
return x_old
|
173 |
+
x = self._inverse(y)
|
174 |
+
self._cached_x_y = x, y
|
175 |
+
return x
|
176 |
+
|
177 |
+
def _call(self, x):
|
178 |
+
"""
|
179 |
+
Abstract method to compute forward transformation.
|
180 |
+
"""
|
181 |
+
raise NotImplementedError
|
182 |
+
|
183 |
+
def _inverse(self, y):
|
184 |
+
"""
|
185 |
+
Abstract method to compute inverse transformation.
|
186 |
+
"""
|
187 |
+
raise NotImplementedError
|
188 |
+
|
189 |
+
def log_abs_det_jacobian(self, x, y):
|
190 |
+
"""
|
191 |
+
Computes the log det jacobian `log |dy/dx|` given input and output.
|
192 |
+
"""
|
193 |
+
raise NotImplementedError
|
194 |
+
|
195 |
+
def __repr__(self):
|
196 |
+
return self.__class__.__name__ + "()"
|
197 |
+
|
198 |
+
def forward_shape(self, shape):
|
199 |
+
"""
|
200 |
+
Infers the shape of the forward computation, given the input shape.
|
201 |
+
Defaults to preserving shape.
|
202 |
+
"""
|
203 |
+
return shape
|
204 |
+
|
205 |
+
def inverse_shape(self, shape):
|
206 |
+
"""
|
207 |
+
Infers the shapes of the inverse computation, given the output shape.
|
208 |
+
Defaults to preserving shape.
|
209 |
+
"""
|
210 |
+
return shape
|
211 |
+
|
212 |
+
|
213 |
+
class _InverseTransform(Transform):
|
214 |
+
"""
|
215 |
+
Inverts a single :class:`Transform`.
|
216 |
+
This class is private; please instead use the ``Transform.inv`` property.
|
217 |
+
"""
|
218 |
+
|
219 |
+
def __init__(self, transform: Transform):
|
220 |
+
super().__init__(cache_size=transform._cache_size)
|
221 |
+
self._inv: Transform = transform
|
222 |
+
|
223 |
+
@constraints.dependent_property(is_discrete=False)
|
224 |
+
def domain(self):
|
225 |
+
assert self._inv is not None
|
226 |
+
return self._inv.codomain
|
227 |
+
|
228 |
+
@constraints.dependent_property(is_discrete=False)
|
229 |
+
def codomain(self):
|
230 |
+
assert self._inv is not None
|
231 |
+
return self._inv.domain
|
232 |
+
|
233 |
+
@property
|
234 |
+
def bijective(self):
|
235 |
+
assert self._inv is not None
|
236 |
+
return self._inv.bijective
|
237 |
+
|
238 |
+
@property
|
239 |
+
def sign(self):
|
240 |
+
assert self._inv is not None
|
241 |
+
return self._inv.sign
|
242 |
+
|
243 |
+
@property
|
244 |
+
def inv(self):
|
245 |
+
return self._inv
|
246 |
+
|
247 |
+
def with_cache(self, cache_size=1):
|
248 |
+
assert self._inv is not None
|
249 |
+
return self.inv.with_cache(cache_size).inv
|
250 |
+
|
251 |
+
def __eq__(self, other):
|
252 |
+
if not isinstance(other, _InverseTransform):
|
253 |
+
return False
|
254 |
+
assert self._inv is not None
|
255 |
+
return self._inv == other._inv
|
256 |
+
|
257 |
+
def __repr__(self):
|
258 |
+
return f"{self.__class__.__name__}({repr(self._inv)})"
|
259 |
+
|
260 |
+
def __call__(self, x):
|
261 |
+
assert self._inv is not None
|
262 |
+
return self._inv._inv_call(x)
|
263 |
+
|
264 |
+
def log_abs_det_jacobian(self, x, y):
|
265 |
+
assert self._inv is not None
|
266 |
+
return -self._inv.log_abs_det_jacobian(y, x)
|
267 |
+
|
268 |
+
def forward_shape(self, shape):
|
269 |
+
return self._inv.inverse_shape(shape)
|
270 |
+
|
271 |
+
def inverse_shape(self, shape):
|
272 |
+
return self._inv.forward_shape(shape)
|
273 |
+
|
274 |
+
|
275 |
+
class ComposeTransform(Transform):
|
276 |
+
"""
|
277 |
+
Composes multiple transforms in a chain.
|
278 |
+
The transforms being composed are responsible for caching.
|
279 |
+
|
280 |
+
Args:
|
281 |
+
parts (list of :class:`Transform`): A list of transforms to compose.
|
282 |
+
cache_size (int): Size of cache. If zero, no caching is done. If one,
|
283 |
+
the latest single value is cached. Only 0 and 1 are supported.
|
284 |
+
"""
|
285 |
+
|
286 |
+
def __init__(self, parts: List[Transform], cache_size=0):
|
287 |
+
if cache_size:
|
288 |
+
parts = [part.with_cache(cache_size) for part in parts]
|
289 |
+
super().__init__(cache_size=cache_size)
|
290 |
+
self.parts = parts
|
291 |
+
|
292 |
+
def __eq__(self, other):
|
293 |
+
if not isinstance(other, ComposeTransform):
|
294 |
+
return False
|
295 |
+
return self.parts == other.parts
|
296 |
+
|
297 |
+
@constraints.dependent_property(is_discrete=False)
|
298 |
+
def domain(self):
|
299 |
+
if not self.parts:
|
300 |
+
return constraints.real
|
301 |
+
domain = self.parts[0].domain
|
302 |
+
# Adjust event_dim to be maximum among all parts.
|
303 |
+
event_dim = self.parts[-1].codomain.event_dim
|
304 |
+
for part in reversed(self.parts):
|
305 |
+
event_dim += part.domain.event_dim - part.codomain.event_dim
|
306 |
+
event_dim = max(event_dim, part.domain.event_dim)
|
307 |
+
assert event_dim >= domain.event_dim
|
308 |
+
if event_dim > domain.event_dim:
|
309 |
+
domain = constraints.independent(domain, event_dim - domain.event_dim)
|
310 |
+
return domain
|
311 |
+
|
312 |
+
@constraints.dependent_property(is_discrete=False)
|
313 |
+
def codomain(self):
|
314 |
+
if not self.parts:
|
315 |
+
return constraints.real
|
316 |
+
codomain = self.parts[-1].codomain
|
317 |
+
# Adjust event_dim to be maximum among all parts.
|
318 |
+
event_dim = self.parts[0].domain.event_dim
|
319 |
+
for part in self.parts:
|
320 |
+
event_dim += part.codomain.event_dim - part.domain.event_dim
|
321 |
+
event_dim = max(event_dim, part.codomain.event_dim)
|
322 |
+
assert event_dim >= codomain.event_dim
|
323 |
+
if event_dim > codomain.event_dim:
|
324 |
+
codomain = constraints.independent(codomain, event_dim - codomain.event_dim)
|
325 |
+
return codomain
|
326 |
+
|
327 |
+
@lazy_property
|
328 |
+
def bijective(self):
|
329 |
+
return all(p.bijective for p in self.parts)
|
330 |
+
|
331 |
+
@lazy_property
|
332 |
+
def sign(self):
|
333 |
+
sign = 1
|
334 |
+
for p in self.parts:
|
335 |
+
sign = sign * p.sign
|
336 |
+
return sign
|
337 |
+
|
338 |
+
@property
|
339 |
+
def inv(self):
|
340 |
+
inv = None
|
341 |
+
if self._inv is not None:
|
342 |
+
inv = self._inv()
|
343 |
+
if inv is None:
|
344 |
+
inv = ComposeTransform([p.inv for p in reversed(self.parts)])
|
345 |
+
self._inv = weakref.ref(inv)
|
346 |
+
inv._inv = weakref.ref(self)
|
347 |
+
return inv
|
348 |
+
|
349 |
+
def with_cache(self, cache_size=1):
|
350 |
+
if self._cache_size == cache_size:
|
351 |
+
return self
|
352 |
+
return ComposeTransform(self.parts, cache_size=cache_size)
|
353 |
+
|
354 |
+
def __call__(self, x):
|
355 |
+
for part in self.parts:
|
356 |
+
x = part(x)
|
357 |
+
return x
|
358 |
+
|
359 |
+
def log_abs_det_jacobian(self, x, y):
|
360 |
+
if not self.parts:
|
361 |
+
return torch.zeros_like(x)
|
362 |
+
|
363 |
+
# Compute intermediates. This will be free if parts[:-1] are all cached.
|
364 |
+
xs = [x]
|
365 |
+
for part in self.parts[:-1]:
|
366 |
+
xs.append(part(xs[-1]))
|
367 |
+
xs.append(y)
|
368 |
+
|
369 |
+
terms = []
|
370 |
+
event_dim = self.domain.event_dim
|
371 |
+
for part, x, y in zip(self.parts, xs[:-1], xs[1:]):
|
372 |
+
terms.append(
|
373 |
+
_sum_rightmost(
|
374 |
+
part.log_abs_det_jacobian(x, y), event_dim - part.domain.event_dim
|
375 |
+
)
|
376 |
+
)
|
377 |
+
event_dim += part.codomain.event_dim - part.domain.event_dim
|
378 |
+
return functools.reduce(operator.add, terms)
|
379 |
+
|
380 |
+
def forward_shape(self, shape):
|
381 |
+
for part in self.parts:
|
382 |
+
shape = part.forward_shape(shape)
|
383 |
+
return shape
|
384 |
+
|
385 |
+
def inverse_shape(self, shape):
|
386 |
+
for part in reversed(self.parts):
|
387 |
+
shape = part.inverse_shape(shape)
|
388 |
+
return shape
|
389 |
+
|
390 |
+
def __repr__(self):
|
391 |
+
fmt_string = self.__class__.__name__ + "(\n "
|
392 |
+
fmt_string += ",\n ".join([p.__repr__() for p in self.parts])
|
393 |
+
fmt_string += "\n)"
|
394 |
+
return fmt_string
|
395 |
+
|
396 |
+
|
397 |
+
identity_transform = ComposeTransform([])
|
398 |
+
|
399 |
+
|
400 |
+
class IndependentTransform(Transform):
|
401 |
+
"""
|
402 |
+
Wrapper around another transform to treat
|
403 |
+
``reinterpreted_batch_ndims``-many extra of the right most dimensions as
|
404 |
+
dependent. This has no effect on the forward or backward transforms, but
|
405 |
+
does sum out ``reinterpreted_batch_ndims``-many of the rightmost dimensions
|
406 |
+
in :meth:`log_abs_det_jacobian`.
|
407 |
+
|
408 |
+
Args:
|
409 |
+
base_transform (:class:`Transform`): A base transform.
|
410 |
+
reinterpreted_batch_ndims (int): The number of extra rightmost
|
411 |
+
dimensions to treat as dependent.
|
412 |
+
"""
|
413 |
+
|
414 |
+
def __init__(self, base_transform, reinterpreted_batch_ndims, cache_size=0):
|
415 |
+
super().__init__(cache_size=cache_size)
|
416 |
+
self.base_transform = base_transform.with_cache(cache_size)
|
417 |
+
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
|
418 |
+
|
419 |
+
def with_cache(self, cache_size=1):
|
420 |
+
if self._cache_size == cache_size:
|
421 |
+
return self
|
422 |
+
return IndependentTransform(
|
423 |
+
self.base_transform, self.reinterpreted_batch_ndims, cache_size=cache_size
|
424 |
+
)
|
425 |
+
|
426 |
+
@constraints.dependent_property(is_discrete=False)
|
427 |
+
def domain(self):
|
428 |
+
return constraints.independent(
|
429 |
+
self.base_transform.domain, self.reinterpreted_batch_ndims
|
430 |
+
)
|
431 |
+
|
432 |
+
@constraints.dependent_property(is_discrete=False)
|
433 |
+
def codomain(self):
|
434 |
+
return constraints.independent(
|
435 |
+
self.base_transform.codomain, self.reinterpreted_batch_ndims
|
436 |
+
)
|
437 |
+
|
438 |
+
@property
|
439 |
+
def bijective(self):
|
440 |
+
return self.base_transform.bijective
|
441 |
+
|
442 |
+
@property
|
443 |
+
def sign(self):
|
444 |
+
return self.base_transform.sign
|
445 |
+
|
446 |
+
def _call(self, x):
|
447 |
+
if x.dim() < self.domain.event_dim:
|
448 |
+
raise ValueError("Too few dimensions on input")
|
449 |
+
return self.base_transform(x)
|
450 |
+
|
451 |
+
def _inverse(self, y):
|
452 |
+
if y.dim() < self.codomain.event_dim:
|
453 |
+
raise ValueError("Too few dimensions on input")
|
454 |
+
return self.base_transform.inv(y)
|
455 |
+
|
456 |
+
def log_abs_det_jacobian(self, x, y):
|
457 |
+
result = self.base_transform.log_abs_det_jacobian(x, y)
|
458 |
+
result = _sum_rightmost(result, self.reinterpreted_batch_ndims)
|
459 |
+
return result
|
460 |
+
|
461 |
+
def __repr__(self):
|
462 |
+
return f"{self.__class__.__name__}({repr(self.base_transform)}, {self.reinterpreted_batch_ndims})"
|
463 |
+
|
464 |
+
def forward_shape(self, shape):
|
465 |
+
return self.base_transform.forward_shape(shape)
|
466 |
+
|
467 |
+
def inverse_shape(self, shape):
|
468 |
+
return self.base_transform.inverse_shape(shape)
|
469 |
+
|
470 |
+
|
471 |
+
class ReshapeTransform(Transform):
|
472 |
+
"""
|
473 |
+
Unit Jacobian transform to reshape the rightmost part of a tensor.
|
474 |
+
|
475 |
+
Note that ``in_shape`` and ``out_shape`` must have the same number of
|
476 |
+
elements, just as for :meth:`torch.Tensor.reshape`.
|
477 |
+
|
478 |
+
Arguments:
|
479 |
+
in_shape (torch.Size): The input event shape.
|
480 |
+
out_shape (torch.Size): The output event shape.
|
481 |
+
"""
|
482 |
+
|
483 |
+
bijective = True
|
484 |
+
|
485 |
+
def __init__(self, in_shape, out_shape, cache_size=0):
|
486 |
+
self.in_shape = torch.Size(in_shape)
|
487 |
+
self.out_shape = torch.Size(out_shape)
|
488 |
+
if self.in_shape.numel() != self.out_shape.numel():
|
489 |
+
raise ValueError("in_shape, out_shape have different numbers of elements")
|
490 |
+
super().__init__(cache_size=cache_size)
|
491 |
+
|
492 |
+
@constraints.dependent_property
|
493 |
+
def domain(self):
|
494 |
+
return constraints.independent(constraints.real, len(self.in_shape))
|
495 |
+
|
496 |
+
@constraints.dependent_property
|
497 |
+
def codomain(self):
|
498 |
+
return constraints.independent(constraints.real, len(self.out_shape))
|
499 |
+
|
500 |
+
def with_cache(self, cache_size=1):
|
501 |
+
if self._cache_size == cache_size:
|
502 |
+
return self
|
503 |
+
return ReshapeTransform(self.in_shape, self.out_shape, cache_size=cache_size)
|
504 |
+
|
505 |
+
def _call(self, x):
|
506 |
+
batch_shape = x.shape[: x.dim() - len(self.in_shape)]
|
507 |
+
return x.reshape(batch_shape + self.out_shape)
|
508 |
+
|
509 |
+
def _inverse(self, y):
|
510 |
+
batch_shape = y.shape[: y.dim() - len(self.out_shape)]
|
511 |
+
return y.reshape(batch_shape + self.in_shape)
|
512 |
+
|
513 |
+
def log_abs_det_jacobian(self, x, y):
|
514 |
+
batch_shape = x.shape[: x.dim() - len(self.in_shape)]
|
515 |
+
return x.new_zeros(batch_shape)
|
516 |
+
|
517 |
+
def forward_shape(self, shape):
|
518 |
+
if len(shape) < len(self.in_shape):
|
519 |
+
raise ValueError("Too few dimensions on input")
|
520 |
+
cut = len(shape) - len(self.in_shape)
|
521 |
+
if shape[cut:] != self.in_shape:
|
522 |
+
raise ValueError(
|
523 |
+
f"Shape mismatch: expected {shape[cut:]} but got {self.in_shape}"
|
524 |
+
)
|
525 |
+
return shape[:cut] + self.out_shape
|
526 |
+
|
527 |
+
def inverse_shape(self, shape):
|
528 |
+
if len(shape) < len(self.out_shape):
|
529 |
+
raise ValueError("Too few dimensions on input")
|
530 |
+
cut = len(shape) - len(self.out_shape)
|
531 |
+
if shape[cut:] != self.out_shape:
|
532 |
+
raise ValueError(
|
533 |
+
f"Shape mismatch: expected {shape[cut:]} but got {self.out_shape}"
|
534 |
+
)
|
535 |
+
return shape[:cut] + self.in_shape
|
536 |
+
|
537 |
+
|
538 |
+
class ExpTransform(Transform):
|
539 |
+
r"""
|
540 |
+
Transform via the mapping :math:`y = \exp(x)`.
|
541 |
+
"""
|
542 |
+
domain = constraints.real
|
543 |
+
codomain = constraints.positive
|
544 |
+
bijective = True
|
545 |
+
sign = +1
|
546 |
+
|
547 |
+
def __eq__(self, other):
|
548 |
+
return isinstance(other, ExpTransform)
|
549 |
+
|
550 |
+
def _call(self, x):
|
551 |
+
return x.exp()
|
552 |
+
|
553 |
+
def _inverse(self, y):
|
554 |
+
return y.log()
|
555 |
+
|
556 |
+
def log_abs_det_jacobian(self, x, y):
|
557 |
+
return x
|
558 |
+
|
559 |
+
|
560 |
+
class PowerTransform(Transform):
|
561 |
+
r"""
|
562 |
+
Transform via the mapping :math:`y = x^{\text{exponent}}`.
|
563 |
+
"""
|
564 |
+
domain = constraints.positive
|
565 |
+
codomain = constraints.positive
|
566 |
+
bijective = True
|
567 |
+
|
568 |
+
def __init__(self, exponent, cache_size=0):
|
569 |
+
super().__init__(cache_size=cache_size)
|
570 |
+
(self.exponent,) = broadcast_all(exponent)
|
571 |
+
|
572 |
+
def with_cache(self, cache_size=1):
|
573 |
+
if self._cache_size == cache_size:
|
574 |
+
return self
|
575 |
+
return PowerTransform(self.exponent, cache_size=cache_size)
|
576 |
+
|
577 |
+
@lazy_property
|
578 |
+
def sign(self):
|
579 |
+
return self.exponent.sign()
|
580 |
+
|
581 |
+
def __eq__(self, other):
|
582 |
+
if not isinstance(other, PowerTransform):
|
583 |
+
return False
|
584 |
+
return self.exponent.eq(other.exponent).all().item()
|
585 |
+
|
586 |
+
def _call(self, x):
|
587 |
+
return x.pow(self.exponent)
|
588 |
+
|
589 |
+
def _inverse(self, y):
|
590 |
+
return y.pow(1 / self.exponent)
|
591 |
+
|
592 |
+
def log_abs_det_jacobian(self, x, y):
|
593 |
+
return (self.exponent * y / x).abs().log()
|
594 |
+
|
595 |
+
def forward_shape(self, shape):
|
596 |
+
return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
|
597 |
+
|
598 |
+
def inverse_shape(self, shape):
|
599 |
+
return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
|
600 |
+
|
601 |
+
|
602 |
+
def _clipped_sigmoid(x):
|
603 |
+
finfo = torch.finfo(x.dtype)
|
604 |
+
return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1.0 - finfo.eps)
|
605 |
+
|
606 |
+
|
607 |
+
class SigmoidTransform(Transform):
|
608 |
+
r"""
|
609 |
+
Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`.
|
610 |
+
"""
|
611 |
+
domain = constraints.real
|
612 |
+
codomain = constraints.unit_interval
|
613 |
+
bijective = True
|
614 |
+
sign = +1
|
615 |
+
|
616 |
+
def __eq__(self, other):
|
617 |
+
return isinstance(other, SigmoidTransform)
|
618 |
+
|
619 |
+
def _call(self, x):
|
620 |
+
return _clipped_sigmoid(x)
|
621 |
+
|
622 |
+
def _inverse(self, y):
|
623 |
+
finfo = torch.finfo(y.dtype)
|
624 |
+
y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps)
|
625 |
+
return y.log() - (-y).log1p()
|
626 |
+
|
627 |
+
def log_abs_det_jacobian(self, x, y):
|
628 |
+
return -F.softplus(-x) - F.softplus(x)
|
629 |
+
|
630 |
+
|
631 |
+
class SoftplusTransform(Transform):
|
632 |
+
r"""
|
633 |
+
Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`.
|
634 |
+
The implementation reverts to the linear function when :math:`x > 20`.
|
635 |
+
"""
|
636 |
+
domain = constraints.real
|
637 |
+
codomain = constraints.positive
|
638 |
+
bijective = True
|
639 |
+
sign = +1
|
640 |
+
|
641 |
+
def __eq__(self, other):
|
642 |
+
return isinstance(other, SoftplusTransform)
|
643 |
+
|
644 |
+
def _call(self, x):
|
645 |
+
return softplus(x)
|
646 |
+
|
647 |
+
def _inverse(self, y):
|
648 |
+
return (-y).expm1().neg().log() + y
|
649 |
+
|
650 |
+
def log_abs_det_jacobian(self, x, y):
|
651 |
+
return -softplus(-x)
|
652 |
+
|
653 |
+
|
654 |
+
class TanhTransform(Transform):
|
655 |
+
r"""
|
656 |
+
Transform via the mapping :math:`y = \tanh(x)`.
|
657 |
+
|
658 |
+
It is equivalent to
|
659 |
+
```
|
660 |
+
ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)])
|
661 |
+
```
|
662 |
+
However this might not be numerically stable, thus it is recommended to use `TanhTransform`
|
663 |
+
instead.
|
664 |
+
|
665 |
+
Note that one should use `cache_size=1` when it comes to `NaN/Inf` values.
|
666 |
+
|
667 |
+
"""
|
668 |
+
domain = constraints.real
|
669 |
+
codomain = constraints.interval(-1.0, 1.0)
|
670 |
+
bijective = True
|
671 |
+
sign = +1
|
672 |
+
|
673 |
+
def __eq__(self, other):
|
674 |
+
return isinstance(other, TanhTransform)
|
675 |
+
|
676 |
+
def _call(self, x):
|
677 |
+
return x.tanh()
|
678 |
+
|
679 |
+
def _inverse(self, y):
|
680 |
+
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
|
681 |
+
# one should use `cache_size=1` instead
|
682 |
+
return torch.atanh(y)
|
683 |
+
|
684 |
+
def log_abs_det_jacobian(self, x, y):
|
685 |
+
# We use a formula that is more numerically stable, see details in the following link
|
686 |
+
# https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80
|
687 |
+
return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x))
|
688 |
+
|
689 |
+
|
690 |
+
class AbsTransform(Transform):
|
691 |
+
r"""
|
692 |
+
Transform via the mapping :math:`y = |x|`.
|
693 |
+
"""
|
694 |
+
domain = constraints.real
|
695 |
+
codomain = constraints.positive
|
696 |
+
|
697 |
+
def __eq__(self, other):
|
698 |
+
return isinstance(other, AbsTransform)
|
699 |
+
|
700 |
+
def _call(self, x):
|
701 |
+
return x.abs()
|
702 |
+
|
703 |
+
def _inverse(self, y):
|
704 |
+
return y
|
705 |
+
|
706 |
+
|
707 |
+
class AffineTransform(Transform):
|
708 |
+
r"""
|
709 |
+
Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`.
|
710 |
+
|
711 |
+
Args:
|
712 |
+
loc (Tensor or float): Location parameter.
|
713 |
+
scale (Tensor or float): Scale parameter.
|
714 |
+
event_dim (int): Optional size of `event_shape`. This should be zero
|
715 |
+
for univariate random variables, 1 for distributions over vectors,
|
716 |
+
2 for distributions over matrices, etc.
|
717 |
+
"""
|
718 |
+
bijective = True
|
719 |
+
|
720 |
+
def __init__(self, loc, scale, event_dim=0, cache_size=0):
|
721 |
+
super().__init__(cache_size=cache_size)
|
722 |
+
self.loc = loc
|
723 |
+
self.scale = scale
|
724 |
+
self._event_dim = event_dim
|
725 |
+
|
726 |
+
@property
|
727 |
+
def event_dim(self):
|
728 |
+
return self._event_dim
|
729 |
+
|
730 |
+
@constraints.dependent_property(is_discrete=False)
|
731 |
+
def domain(self):
|
732 |
+
if self.event_dim == 0:
|
733 |
+
return constraints.real
|
734 |
+
return constraints.independent(constraints.real, self.event_dim)
|
735 |
+
|
736 |
+
@constraints.dependent_property(is_discrete=False)
|
737 |
+
def codomain(self):
|
738 |
+
if self.event_dim == 0:
|
739 |
+
return constraints.real
|
740 |
+
return constraints.independent(constraints.real, self.event_dim)
|
741 |
+
|
742 |
+
def with_cache(self, cache_size=1):
|
743 |
+
if self._cache_size == cache_size:
|
744 |
+
return self
|
745 |
+
return AffineTransform(
|
746 |
+
self.loc, self.scale, self.event_dim, cache_size=cache_size
|
747 |
+
)
|
748 |
+
|
749 |
+
def __eq__(self, other):
|
750 |
+
if not isinstance(other, AffineTransform):
|
751 |
+
return False
|
752 |
+
|
753 |
+
if isinstance(self.loc, numbers.Number) and isinstance(
|
754 |
+
other.loc, numbers.Number
|
755 |
+
):
|
756 |
+
if self.loc != other.loc:
|
757 |
+
return False
|
758 |
+
else:
|
759 |
+
if not (self.loc == other.loc).all().item():
|
760 |
+
return False
|
761 |
+
|
762 |
+
if isinstance(self.scale, numbers.Number) and isinstance(
|
763 |
+
other.scale, numbers.Number
|
764 |
+
):
|
765 |
+
if self.scale != other.scale:
|
766 |
+
return False
|
767 |
+
else:
|
768 |
+
if not (self.scale == other.scale).all().item():
|
769 |
+
return False
|
770 |
+
|
771 |
+
return True
|
772 |
+
|
773 |
+
@property
|
774 |
+
def sign(self):
|
775 |
+
if isinstance(self.scale, numbers.Real):
|
776 |
+
return 1 if float(self.scale) > 0 else -1 if float(self.scale) < 0 else 0
|
777 |
+
return self.scale.sign()
|
778 |
+
|
779 |
+
def _call(self, x):
|
780 |
+
return self.loc + self.scale * x
|
781 |
+
|
782 |
+
def _inverse(self, y):
|
783 |
+
return (y - self.loc) / self.scale
|
784 |
+
|
785 |
+
def log_abs_det_jacobian(self, x, y):
|
786 |
+
shape = x.shape
|
787 |
+
scale = self.scale
|
788 |
+
if isinstance(scale, numbers.Real):
|
789 |
+
result = torch.full_like(x, math.log(abs(scale)))
|
790 |
+
else:
|
791 |
+
result = torch.abs(scale).log()
|
792 |
+
if self.event_dim:
|
793 |
+
result_size = result.size()[: -self.event_dim] + (-1,)
|
794 |
+
result = result.view(result_size).sum(-1)
|
795 |
+
shape = shape[: -self.event_dim]
|
796 |
+
return result.expand(shape)
|
797 |
+
|
798 |
+
def forward_shape(self, shape):
|
799 |
+
return torch.broadcast_shapes(
|
800 |
+
shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ())
|
801 |
+
)
|
802 |
+
|
803 |
+
def inverse_shape(self, shape):
|
804 |
+
return torch.broadcast_shapes(
|
805 |
+
shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ())
|
806 |
+
)
|
807 |
+
|
808 |
+
|
809 |
+
class CorrCholeskyTransform(Transform):
|
810 |
+
r"""
|
811 |
+
Transforms an uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the
|
812 |
+
Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower
|
813 |
+
triangular matrix with positive diagonals and unit Euclidean norm for each row.
|
814 |
+
The transform is processed as follows:
|
815 |
+
|
816 |
+
1. First we convert x into a lower triangular matrix in row order.
|
817 |
+
2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of
|
818 |
+
class :class:`StickBreakingTransform` to transform :math:`X_i` into a
|
819 |
+
unit Euclidean length vector using the following steps:
|
820 |
+
- Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`.
|
821 |
+
- Transforms into an unsigned domain: :math:`z_i = r_i^2`.
|
822 |
+
- Applies :math:`s_i = StickBreakingTransform(z_i)`.
|
823 |
+
- Transforms back into signed domain: :math:`y_i = sign(r_i) * \sqrt{s_i}`.
|
824 |
+
"""
|
825 |
+
domain = constraints.real_vector
|
826 |
+
codomain = constraints.corr_cholesky
|
827 |
+
bijective = True
|
828 |
+
|
829 |
+
def _call(self, x):
|
830 |
+
x = torch.tanh(x)
|
831 |
+
eps = torch.finfo(x.dtype).eps
|
832 |
+
x = x.clamp(min=-1 + eps, max=1 - eps)
|
833 |
+
r = vec_to_tril_matrix(x, diag=-1)
|
834 |
+
# apply stick-breaking on the squared values
|
835 |
+
# Note that y = sign(r) * sqrt(z * z1m_cumprod)
|
836 |
+
# = (sign(r) * sqrt(z)) * sqrt(z1m_cumprod) = r * sqrt(z1m_cumprod)
|
837 |
+
z = r**2
|
838 |
+
z1m_cumprod_sqrt = (1 - z).sqrt().cumprod(-1)
|
839 |
+
# Diagonal elements must be 1.
|
840 |
+
r = r + torch.eye(r.shape[-1], dtype=r.dtype, device=r.device)
|
841 |
+
y = r * pad(z1m_cumprod_sqrt[..., :-1], [1, 0], value=1)
|
842 |
+
return y
|
843 |
+
|
844 |
+
def _inverse(self, y):
|
845 |
+
# inverse stick-breaking
|
846 |
+
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
|
847 |
+
y_cumsum = 1 - torch.cumsum(y * y, dim=-1)
|
848 |
+
y_cumsum_shifted = pad(y_cumsum[..., :-1], [1, 0], value=1)
|
849 |
+
y_vec = tril_matrix_to_vec(y, diag=-1)
|
850 |
+
y_cumsum_vec = tril_matrix_to_vec(y_cumsum_shifted, diag=-1)
|
851 |
+
t = y_vec / (y_cumsum_vec).sqrt()
|
852 |
+
# inverse of tanh
|
853 |
+
x = (t.log1p() - t.neg().log1p()) / 2
|
854 |
+
return x
|
855 |
+
|
856 |
+
def log_abs_det_jacobian(self, x, y, intermediates=None):
|
857 |
+
# Because domain and codomain are two spaces with different dimensions, determinant of
|
858 |
+
# Jacobian is not well-defined. We return `log_abs_det_jacobian` of `x` and the
|
859 |
+
# flattened lower triangular part of `y`.
|
860 |
+
|
861 |
+
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
|
862 |
+
y1m_cumsum = 1 - (y * y).cumsum(dim=-1)
|
863 |
+
# by taking diagonal=-2, we don't need to shift z_cumprod to the right
|
864 |
+
# also works for 2 x 2 matrix
|
865 |
+
y1m_cumsum_tril = tril_matrix_to_vec(y1m_cumsum, diag=-2)
|
866 |
+
stick_breaking_logdet = 0.5 * (y1m_cumsum_tril).log().sum(-1)
|
867 |
+
tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.0)).sum(dim=-1)
|
868 |
+
return stick_breaking_logdet + tanh_logdet
|
869 |
+
|
870 |
+
def forward_shape(self, shape):
|
871 |
+
# Reshape from (..., N) to (..., D, D).
|
872 |
+
if len(shape) < 1:
|
873 |
+
raise ValueError("Too few dimensions on input")
|
874 |
+
N = shape[-1]
|
875 |
+
D = round((0.25 + 2 * N) ** 0.5 + 0.5)
|
876 |
+
if D * (D - 1) // 2 != N:
|
877 |
+
raise ValueError("Input is not a flattend lower-diagonal number")
|
878 |
+
return shape[:-1] + (D, D)
|
879 |
+
|
880 |
+
def inverse_shape(self, shape):
|
881 |
+
# Reshape from (..., D, D) to (..., N).
|
882 |
+
if len(shape) < 2:
|
883 |
+
raise ValueError("Too few dimensions on input")
|
884 |
+
if shape[-2] != shape[-1]:
|
885 |
+
raise ValueError("Input is not square")
|
886 |
+
D = shape[-1]
|
887 |
+
N = D * (D - 1) // 2
|
888 |
+
return shape[:-2] + (N,)
|
889 |
+
|
890 |
+
|
891 |
+
class SoftmaxTransform(Transform):
|
892 |
+
r"""
|
893 |
+
Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then
|
894 |
+
normalizing.
|
895 |
+
|
896 |
+
This is not bijective and cannot be used for HMC. However this acts mostly
|
897 |
+
coordinate-wise (except for the final normalization), and thus is
|
898 |
+
appropriate for coordinate-wise optimization algorithms.
|
899 |
+
"""
|
900 |
+
domain = constraints.real_vector
|
901 |
+
codomain = constraints.simplex
|
902 |
+
|
903 |
+
def __eq__(self, other):
|
904 |
+
return isinstance(other, SoftmaxTransform)
|
905 |
+
|
906 |
+
def _call(self, x):
|
907 |
+
logprobs = x
|
908 |
+
probs = (logprobs - logprobs.max(-1, True)[0]).exp()
|
909 |
+
return probs / probs.sum(-1, True)
|
910 |
+
|
911 |
+
def _inverse(self, y):
|
912 |
+
probs = y
|
913 |
+
return probs.log()
|
914 |
+
|
915 |
+
def forward_shape(self, shape):
|
916 |
+
if len(shape) < 1:
|
917 |
+
raise ValueError("Too few dimensions on input")
|
918 |
+
return shape
|
919 |
+
|
920 |
+
def inverse_shape(self, shape):
|
921 |
+
if len(shape) < 1:
|
922 |
+
raise ValueError("Too few dimensions on input")
|
923 |
+
return shape
|
924 |
+
|
925 |
+
|
926 |
+
class StickBreakingTransform(Transform):
|
927 |
+
"""
|
928 |
+
Transform from unconstrained space to the simplex of one additional
|
929 |
+
dimension via a stick-breaking process.
|
930 |
+
|
931 |
+
This transform arises as an iterated sigmoid transform in a stick-breaking
|
932 |
+
construction of the `Dirichlet` distribution: the first logit is
|
933 |
+
transformed via sigmoid to the first probability and the probability of
|
934 |
+
everything else, and then the process recurses.
|
935 |
+
|
936 |
+
This is bijective and appropriate for use in HMC; however it mixes
|
937 |
+
coordinates together and is less appropriate for optimization.
|
938 |
+
"""
|
939 |
+
|
940 |
+
domain = constraints.real_vector
|
941 |
+
codomain = constraints.simplex
|
942 |
+
bijective = True
|
943 |
+
|
944 |
+
def __eq__(self, other):
|
945 |
+
return isinstance(other, StickBreakingTransform)
|
946 |
+
|
947 |
+
def _call(self, x):
|
948 |
+
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
|
949 |
+
z = _clipped_sigmoid(x - offset.log())
|
950 |
+
z_cumprod = (1 - z).cumprod(-1)
|
951 |
+
y = pad(z, [0, 1], value=1) * pad(z_cumprod, [1, 0], value=1)
|
952 |
+
return y
|
953 |
+
|
954 |
+
def _inverse(self, y):
|
955 |
+
y_crop = y[..., :-1]
|
956 |
+
offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1)
|
957 |
+
sf = 1 - y_crop.cumsum(-1)
|
958 |
+
# we clamp to make sure that sf is positive which sometimes does not
|
959 |
+
# happen when y[-1] ~ 0 or y[:-1].sum() ~ 1
|
960 |
+
sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny)
|
961 |
+
x = y_crop.log() - sf.log() + offset.log()
|
962 |
+
return x
|
963 |
+
|
964 |
+
def log_abs_det_jacobian(self, x, y):
|
965 |
+
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
|
966 |
+
x = x - offset.log()
|
967 |
+
# use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x)
|
968 |
+
detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1)
|
969 |
+
return detJ
|
970 |
+
|
971 |
+
def forward_shape(self, shape):
|
972 |
+
if len(shape) < 1:
|
973 |
+
raise ValueError("Too few dimensions on input")
|
974 |
+
return shape[:-1] + (shape[-1] + 1,)
|
975 |
+
|
976 |
+
def inverse_shape(self, shape):
|
977 |
+
if len(shape) < 1:
|
978 |
+
raise ValueError("Too few dimensions on input")
|
979 |
+
return shape[:-1] + (shape[-1] - 1,)
|
980 |
+
|
981 |
+
|
982 |
+
class LowerCholeskyTransform(Transform):
|
983 |
+
"""
|
984 |
+
Transform from unconstrained matrices to lower-triangular matrices with
|
985 |
+
nonnegative diagonal entries.
|
986 |
+
|
987 |
+
This is useful for parameterizing positive definite matrices in terms of
|
988 |
+
their Cholesky factorization.
|
989 |
+
"""
|
990 |
+
|
991 |
+
domain = constraints.independent(constraints.real, 2)
|
992 |
+
codomain = constraints.lower_cholesky
|
993 |
+
|
994 |
+
def __eq__(self, other):
|
995 |
+
return isinstance(other, LowerCholeskyTransform)
|
996 |
+
|
997 |
+
def _call(self, x):
|
998 |
+
return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed()
|
999 |
+
|
1000 |
+
def _inverse(self, y):
|
1001 |
+
return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed()
|
1002 |
+
|
1003 |
+
|
1004 |
+
class PositiveDefiniteTransform(Transform):
|
1005 |
+
"""
|
1006 |
+
Transform from unconstrained matrices to positive-definite matrices.
|
1007 |
+
"""
|
1008 |
+
|
1009 |
+
domain = constraints.independent(constraints.real, 2)
|
1010 |
+
codomain = constraints.positive_definite # type: ignore[assignment]
|
1011 |
+
|
1012 |
+
def __eq__(self, other):
|
1013 |
+
return isinstance(other, PositiveDefiniteTransform)
|
1014 |
+
|
1015 |
+
def _call(self, x):
|
1016 |
+
x = LowerCholeskyTransform()(x)
|
1017 |
+
return x @ x.mT
|
1018 |
+
|
1019 |
+
def _inverse(self, y):
|
1020 |
+
y = torch.linalg.cholesky(y)
|
1021 |
+
return LowerCholeskyTransform().inv(y)
|
1022 |
+
|
1023 |
+
|
1024 |
+
class CatTransform(Transform):
|
1025 |
+
"""
|
1026 |
+
Transform functor that applies a sequence of transforms `tseq`
|
1027 |
+
component-wise to each submatrix at `dim`, of length `lengths[dim]`,
|
1028 |
+
in a way compatible with :func:`torch.cat`.
|
1029 |
+
|
1030 |
+
Example::
|
1031 |
+
|
1032 |
+
x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0)
|
1033 |
+
x = torch.cat([x0, x0], dim=0)
|
1034 |
+
t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10])
|
1035 |
+
t = CatTransform([t0, t0], dim=0, lengths=[20, 20])
|
1036 |
+
y = t(x)
|
1037 |
+
"""
|
1038 |
+
|
1039 |
+
transforms: List[Transform]
|
1040 |
+
|
1041 |
+
def __init__(self, tseq, dim=0, lengths=None, cache_size=0):
|
1042 |
+
assert all(isinstance(t, Transform) for t in tseq)
|
1043 |
+
if cache_size:
|
1044 |
+
tseq = [t.with_cache(cache_size) for t in tseq]
|
1045 |
+
super().__init__(cache_size=cache_size)
|
1046 |
+
self.transforms = list(tseq)
|
1047 |
+
if lengths is None:
|
1048 |
+
lengths = [1] * len(self.transforms)
|
1049 |
+
self.lengths = list(lengths)
|
1050 |
+
assert len(self.lengths) == len(self.transforms)
|
1051 |
+
self.dim = dim
|
1052 |
+
|
1053 |
+
@lazy_property
|
1054 |
+
def event_dim(self):
|
1055 |
+
return max(t.event_dim for t in self.transforms)
|
1056 |
+
|
1057 |
+
@lazy_property
|
1058 |
+
def length(self):
|
1059 |
+
return sum(self.lengths)
|
1060 |
+
|
1061 |
+
def with_cache(self, cache_size=1):
|
1062 |
+
if self._cache_size == cache_size:
|
1063 |
+
return self
|
1064 |
+
return CatTransform(self.transforms, self.dim, self.lengths, cache_size)
|
1065 |
+
|
1066 |
+
def _call(self, x):
|
1067 |
+
assert -x.dim() <= self.dim < x.dim()
|
1068 |
+
assert x.size(self.dim) == self.length
|
1069 |
+
yslices = []
|
1070 |
+
start = 0
|
1071 |
+
for trans, length in zip(self.transforms, self.lengths):
|
1072 |
+
xslice = x.narrow(self.dim, start, length)
|
1073 |
+
yslices.append(trans(xslice))
|
1074 |
+
start = start + length # avoid += for jit compat
|
1075 |
+
return torch.cat(yslices, dim=self.dim)
|
1076 |
+
|
1077 |
+
def _inverse(self, y):
|
1078 |
+
assert -y.dim() <= self.dim < y.dim()
|
1079 |
+
assert y.size(self.dim) == self.length
|
1080 |
+
xslices = []
|
1081 |
+
start = 0
|
1082 |
+
for trans, length in zip(self.transforms, self.lengths):
|
1083 |
+
yslice = y.narrow(self.dim, start, length)
|
1084 |
+
xslices.append(trans.inv(yslice))
|
1085 |
+
start = start + length # avoid += for jit compat
|
1086 |
+
return torch.cat(xslices, dim=self.dim)
|
1087 |
+
|
1088 |
+
def log_abs_det_jacobian(self, x, y):
|
1089 |
+
assert -x.dim() <= self.dim < x.dim()
|
1090 |
+
assert x.size(self.dim) == self.length
|
1091 |
+
assert -y.dim() <= self.dim < y.dim()
|
1092 |
+
assert y.size(self.dim) == self.length
|
1093 |
+
logdetjacs = []
|
1094 |
+
start = 0
|
1095 |
+
for trans, length in zip(self.transforms, self.lengths):
|
1096 |
+
xslice = x.narrow(self.dim, start, length)
|
1097 |
+
yslice = y.narrow(self.dim, start, length)
|
1098 |
+
logdetjac = trans.log_abs_det_jacobian(xslice, yslice)
|
1099 |
+
if trans.event_dim < self.event_dim:
|
1100 |
+
logdetjac = _sum_rightmost(logdetjac, self.event_dim - trans.event_dim)
|
1101 |
+
logdetjacs.append(logdetjac)
|
1102 |
+
start = start + length # avoid += for jit compat
|
1103 |
+
# Decide whether to concatenate or sum.
|
1104 |
+
dim = self.dim
|
1105 |
+
if dim >= 0:
|
1106 |
+
dim = dim - x.dim()
|
1107 |
+
dim = dim + self.event_dim
|
1108 |
+
if dim < 0:
|
1109 |
+
return torch.cat(logdetjacs, dim=dim)
|
1110 |
+
else:
|
1111 |
+
return sum(logdetjacs)
|
1112 |
+
|
1113 |
+
@property
|
1114 |
+
def bijective(self):
|
1115 |
+
return all(t.bijective for t in self.transforms)
|
1116 |
+
|
1117 |
+
@constraints.dependent_property
|
1118 |
+
def domain(self):
|
1119 |
+
return constraints.cat(
|
1120 |
+
[t.domain for t in self.transforms], self.dim, self.lengths
|
1121 |
+
)
|
1122 |
+
|
1123 |
+
@constraints.dependent_property
|
1124 |
+
def codomain(self):
|
1125 |
+
return constraints.cat(
|
1126 |
+
[t.codomain for t in self.transforms], self.dim, self.lengths
|
1127 |
+
)
|
1128 |
+
|
1129 |
+
|
1130 |
+
class StackTransform(Transform):
|
1131 |
+
"""
|
1132 |
+
Transform functor that applies a sequence of transforms `tseq`
|
1133 |
+
component-wise to each submatrix at `dim`
|
1134 |
+
in a way compatible with :func:`torch.stack`.
|
1135 |
+
|
1136 |
+
Example::
|
1137 |
+
|
1138 |
+
x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1)
|
1139 |
+
t = StackTransform([ExpTransform(), identity_transform], dim=1)
|
1140 |
+
y = t(x)
|
1141 |
+
"""
|
1142 |
+
|
1143 |
+
transforms: List[Transform]
|
1144 |
+
|
1145 |
+
def __init__(self, tseq, dim=0, cache_size=0):
|
1146 |
+
assert all(isinstance(t, Transform) for t in tseq)
|
1147 |
+
if cache_size:
|
1148 |
+
tseq = [t.with_cache(cache_size) for t in tseq]
|
1149 |
+
super().__init__(cache_size=cache_size)
|
1150 |
+
self.transforms = list(tseq)
|
1151 |
+
self.dim = dim
|
1152 |
+
|
1153 |
+
def with_cache(self, cache_size=1):
|
1154 |
+
if self._cache_size == cache_size:
|
1155 |
+
return self
|
1156 |
+
return StackTransform(self.transforms, self.dim, cache_size)
|
1157 |
+
|
1158 |
+
def _slice(self, z):
|
1159 |
+
return [z.select(self.dim, i) for i in range(z.size(self.dim))]
|
1160 |
+
|
1161 |
+
def _call(self, x):
|
1162 |
+
assert -x.dim() <= self.dim < x.dim()
|
1163 |
+
assert x.size(self.dim) == len(self.transforms)
|
1164 |
+
yslices = []
|
1165 |
+
for xslice, trans in zip(self._slice(x), self.transforms):
|
1166 |
+
yslices.append(trans(xslice))
|
1167 |
+
return torch.stack(yslices, dim=self.dim)
|
1168 |
+
|
1169 |
+
def _inverse(self, y):
|
1170 |
+
assert -y.dim() <= self.dim < y.dim()
|
1171 |
+
assert y.size(self.dim) == len(self.transforms)
|
1172 |
+
xslices = []
|
1173 |
+
for yslice, trans in zip(self._slice(y), self.transforms):
|
1174 |
+
xslices.append(trans.inv(yslice))
|
1175 |
+
return torch.stack(xslices, dim=self.dim)
|
1176 |
+
|
1177 |
+
def log_abs_det_jacobian(self, x, y):
|
1178 |
+
assert -x.dim() <= self.dim < x.dim()
|
1179 |
+
assert x.size(self.dim) == len(self.transforms)
|
1180 |
+
assert -y.dim() <= self.dim < y.dim()
|
1181 |
+
assert y.size(self.dim) == len(self.transforms)
|
1182 |
+
logdetjacs = []
|
1183 |
+
yslices = self._slice(y)
|
1184 |
+
xslices = self._slice(x)
|
1185 |
+
for xslice, yslice, trans in zip(xslices, yslices, self.transforms):
|
1186 |
+
logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice))
|
1187 |
+
return torch.stack(logdetjacs, dim=self.dim)
|
1188 |
+
|
1189 |
+
@property
|
1190 |
+
def bijective(self):
|
1191 |
+
return all(t.bijective for t in self.transforms)
|
1192 |
+
|
1193 |
+
@constraints.dependent_property
|
1194 |
+
def domain(self):
|
1195 |
+
return constraints.stack([t.domain for t in self.transforms], self.dim)
|
1196 |
+
|
1197 |
+
@constraints.dependent_property
|
1198 |
+
def codomain(self):
|
1199 |
+
return constraints.stack([t.codomain for t in self.transforms], self.dim)
|
1200 |
+
|
1201 |
+
|
1202 |
+
class CumulativeDistributionTransform(Transform):
|
1203 |
+
"""
|
1204 |
+
Transform via the cumulative distribution function of a probability distribution.
|
1205 |
+
|
1206 |
+
Args:
|
1207 |
+
distribution (Distribution): Distribution whose cumulative distribution function to use for
|
1208 |
+
the transformation.
|
1209 |
+
|
1210 |
+
Example::
|
1211 |
+
|
1212 |
+
# Construct a Gaussian copula from a multivariate normal.
|
1213 |
+
base_dist = MultivariateNormal(
|
1214 |
+
loc=torch.zeros(2),
|
1215 |
+
scale_tril=LKJCholesky(2).sample(),
|
1216 |
+
)
|
1217 |
+
transform = CumulativeDistributionTransform(Normal(0, 1))
|
1218 |
+
copula = TransformedDistribution(base_dist, [transform])
|
1219 |
+
"""
|
1220 |
+
|
1221 |
+
bijective = True
|
1222 |
+
codomain = constraints.unit_interval
|
1223 |
+
sign = +1
|
1224 |
+
|
1225 |
+
def __init__(self, distribution, cache_size=0):
|
1226 |
+
super().__init__(cache_size=cache_size)
|
1227 |
+
self.distribution = distribution
|
1228 |
+
|
1229 |
+
@property
|
1230 |
+
def domain(self):
|
1231 |
+
return self.distribution.support
|
1232 |
+
|
1233 |
+
def _call(self, x):
|
1234 |
+
return self.distribution.cdf(x)
|
1235 |
+
|
1236 |
+
def _inverse(self, y):
|
1237 |
+
return self.distribution.icdf(y)
|
1238 |
+
|
1239 |
+
def log_abs_det_jacobian(self, x, y):
|
1240 |
+
return self.distribution.log_prob(x)
|
1241 |
+
|
1242 |
+
def with_cache(self, cache_size=1):
|
1243 |
+
if self._cache_size == cache_size:
|
1244 |
+
return self
|
1245 |
+
return CumulativeDistributionTransform(self.distribution, cache_size=cache_size)
|
venv/lib/python3.10/site-packages/torch/distributions/uniform.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numbers import Number
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import nan
|
5 |
+
from torch.distributions import constraints
|
6 |
+
from torch.distributions.distribution import Distribution
|
7 |
+
from torch.distributions.utils import broadcast_all
|
8 |
+
|
9 |
+
__all__ = ["Uniform"]
|
10 |
+
|
11 |
+
|
12 |
+
class Uniform(Distribution):
|
13 |
+
r"""
|
14 |
+
Generates uniformly distributed random samples from the half-open interval
|
15 |
+
``[low, high)``.
|
16 |
+
|
17 |
+
Example::
|
18 |
+
|
19 |
+
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
|
20 |
+
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
|
21 |
+
>>> # xdoctest: +SKIP
|
22 |
+
tensor([ 2.3418])
|
23 |
+
|
24 |
+
Args:
|
25 |
+
low (float or Tensor): lower range (inclusive).
|
26 |
+
high (float or Tensor): upper range (exclusive).
|
27 |
+
"""
|
28 |
+
# TODO allow (loc,scale) parameterization to allow independent constraints.
|
29 |
+
arg_constraints = {
|
30 |
+
"low": constraints.dependent(is_discrete=False, event_dim=0),
|
31 |
+
"high": constraints.dependent(is_discrete=False, event_dim=0),
|
32 |
+
}
|
33 |
+
has_rsample = True
|
34 |
+
|
35 |
+
@property
|
36 |
+
def mean(self):
|
37 |
+
return (self.high + self.low) / 2
|
38 |
+
|
39 |
+
@property
|
40 |
+
def mode(self):
|
41 |
+
return nan * self.high
|
42 |
+
|
43 |
+
@property
|
44 |
+
def stddev(self):
|
45 |
+
return (self.high - self.low) / 12**0.5
|
46 |
+
|
47 |
+
@property
|
48 |
+
def variance(self):
|
49 |
+
return (self.high - self.low).pow(2) / 12
|
50 |
+
|
51 |
+
def __init__(self, low, high, validate_args=None):
|
52 |
+
self.low, self.high = broadcast_all(low, high)
|
53 |
+
|
54 |
+
if isinstance(low, Number) and isinstance(high, Number):
|
55 |
+
batch_shape = torch.Size()
|
56 |
+
else:
|
57 |
+
batch_shape = self.low.size()
|
58 |
+
super().__init__(batch_shape, validate_args=validate_args)
|
59 |
+
|
60 |
+
if self._validate_args and not torch.lt(self.low, self.high).all():
|
61 |
+
raise ValueError("Uniform is not defined when low>= high")
|
62 |
+
|
63 |
+
def expand(self, batch_shape, _instance=None):
|
64 |
+
new = self._get_checked_instance(Uniform, _instance)
|
65 |
+
batch_shape = torch.Size(batch_shape)
|
66 |
+
new.low = self.low.expand(batch_shape)
|
67 |
+
new.high = self.high.expand(batch_shape)
|
68 |
+
super(Uniform, new).__init__(batch_shape, validate_args=False)
|
69 |
+
new._validate_args = self._validate_args
|
70 |
+
return new
|
71 |
+
|
72 |
+
@constraints.dependent_property(is_discrete=False, event_dim=0)
|
73 |
+
def support(self):
|
74 |
+
return constraints.interval(self.low, self.high)
|
75 |
+
|
76 |
+
def rsample(self, sample_shape=torch.Size()):
|
77 |
+
shape = self._extended_shape(sample_shape)
|
78 |
+
rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
|
79 |
+
return self.low + rand * (self.high - self.low)
|
80 |
+
|
81 |
+
def log_prob(self, value):
|
82 |
+
if self._validate_args:
|
83 |
+
self._validate_sample(value)
|
84 |
+
lb = self.low.le(value).type_as(self.low)
|
85 |
+
ub = self.high.gt(value).type_as(self.low)
|
86 |
+
return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
|
87 |
+
|
88 |
+
def cdf(self, value):
|
89 |
+
if self._validate_args:
|
90 |
+
self._validate_sample(value)
|
91 |
+
result = (value - self.low) / (self.high - self.low)
|
92 |
+
return result.clamp(min=0, max=1)
|
93 |
+
|
94 |
+
def icdf(self, value):
|
95 |
+
result = value * (self.high - self.low) + self.low
|
96 |
+
return result
|
97 |
+
|
98 |
+
def entropy(self):
|
99 |
+
return torch.log(self.high - self.low)
|
venv/lib/python3.10/site-packages/torch/distributions/utils.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import update_wrapper
|
2 |
+
from numbers import Number
|
3 |
+
from typing import Any, Dict
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch.overrides import is_tensor_like
|
8 |
+
|
9 |
+
euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"broadcast_all",
|
13 |
+
"logits_to_probs",
|
14 |
+
"clamp_probs",
|
15 |
+
"probs_to_logits",
|
16 |
+
"lazy_property",
|
17 |
+
"tril_matrix_to_vec",
|
18 |
+
"vec_to_tril_matrix",
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
def broadcast_all(*values):
|
23 |
+
r"""
|
24 |
+
Given a list of values (possibly containing numbers), returns a list where each
|
25 |
+
value is broadcasted based on the following rules:
|
26 |
+
- `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
|
27 |
+
- numbers.Number instances (scalars) are upcast to tensors having
|
28 |
+
the same size and type as the first tensor passed to `values`. If all the
|
29 |
+
values are scalars, then they are upcasted to scalar Tensors.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
values (list of `numbers.Number`, `torch.*Tensor` or objects implementing __torch_function__)
|
33 |
+
|
34 |
+
Raises:
|
35 |
+
ValueError: if any of the values is not a `numbers.Number` instance,
|
36 |
+
a `torch.*Tensor` instance, or an instance implementing __torch_function__
|
37 |
+
"""
|
38 |
+
if not all(is_tensor_like(v) or isinstance(v, Number) for v in values):
|
39 |
+
raise ValueError(
|
40 |
+
"Input arguments must all be instances of numbers.Number, "
|
41 |
+
"torch.Tensor or objects implementing __torch_function__."
|
42 |
+
)
|
43 |
+
if not all(is_tensor_like(v) for v in values):
|
44 |
+
options: Dict[str, Any] = dict(dtype=torch.get_default_dtype())
|
45 |
+
for value in values:
|
46 |
+
if isinstance(value, torch.Tensor):
|
47 |
+
options = dict(dtype=value.dtype, device=value.device)
|
48 |
+
break
|
49 |
+
new_values = [
|
50 |
+
v if is_tensor_like(v) else torch.tensor(v, **options) for v in values
|
51 |
+
]
|
52 |
+
return torch.broadcast_tensors(*new_values)
|
53 |
+
return torch.broadcast_tensors(*values)
|
54 |
+
|
55 |
+
|
56 |
+
def _standard_normal(shape, dtype, device):
|
57 |
+
if torch._C._get_tracing_state():
|
58 |
+
# [JIT WORKAROUND] lack of support for .normal_()
|
59 |
+
return torch.normal(
|
60 |
+
torch.zeros(shape, dtype=dtype, device=device),
|
61 |
+
torch.ones(shape, dtype=dtype, device=device),
|
62 |
+
)
|
63 |
+
return torch.empty(shape, dtype=dtype, device=device).normal_()
|
64 |
+
|
65 |
+
|
66 |
+
def _sum_rightmost(value, dim):
|
67 |
+
r"""
|
68 |
+
Sum out ``dim`` many rightmost dimensions of a given tensor.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
value (Tensor): A tensor of ``.dim()`` at least ``dim``.
|
72 |
+
dim (int): The number of rightmost dims to sum out.
|
73 |
+
"""
|
74 |
+
if dim == 0:
|
75 |
+
return value
|
76 |
+
required_shape = value.shape[:-dim] + (-1,)
|
77 |
+
return value.reshape(required_shape).sum(-1)
|
78 |
+
|
79 |
+
|
80 |
+
def logits_to_probs(logits, is_binary=False):
|
81 |
+
r"""
|
82 |
+
Converts a tensor of logits into probabilities. Note that for the
|
83 |
+
binary case, each value denotes log odds, whereas for the
|
84 |
+
multi-dimensional case, the values along the last dimension denote
|
85 |
+
the log probabilities (possibly unnormalized) of the events.
|
86 |
+
"""
|
87 |
+
if is_binary:
|
88 |
+
return torch.sigmoid(logits)
|
89 |
+
return F.softmax(logits, dim=-1)
|
90 |
+
|
91 |
+
|
92 |
+
def clamp_probs(probs):
|
93 |
+
eps = torch.finfo(probs.dtype).eps
|
94 |
+
return probs.clamp(min=eps, max=1 - eps)
|
95 |
+
|
96 |
+
|
97 |
+
def probs_to_logits(probs, is_binary=False):
|
98 |
+
r"""
|
99 |
+
Converts a tensor of probabilities into logits. For the binary case,
|
100 |
+
this denotes the probability of occurrence of the event indexed by `1`.
|
101 |
+
For the multi-dimensional case, the values along the last dimension
|
102 |
+
denote the probabilities of occurrence of each of the events.
|
103 |
+
"""
|
104 |
+
ps_clamped = clamp_probs(probs)
|
105 |
+
if is_binary:
|
106 |
+
return torch.log(ps_clamped) - torch.log1p(-ps_clamped)
|
107 |
+
return torch.log(ps_clamped)
|
108 |
+
|
109 |
+
|
110 |
+
class lazy_property:
|
111 |
+
r"""
|
112 |
+
Used as a decorator for lazy loading of class attributes. This uses a
|
113 |
+
non-data descriptor that calls the wrapped method to compute the property on
|
114 |
+
first call; thereafter replacing the wrapped method into an instance
|
115 |
+
attribute.
|
116 |
+
"""
|
117 |
+
|
118 |
+
def __init__(self, wrapped):
|
119 |
+
self.wrapped = wrapped
|
120 |
+
update_wrapper(self, wrapped)
|
121 |
+
|
122 |
+
def __get__(self, instance, obj_type=None):
|
123 |
+
if instance is None:
|
124 |
+
return _lazy_property_and_property(self.wrapped)
|
125 |
+
with torch.enable_grad():
|
126 |
+
value = self.wrapped(instance)
|
127 |
+
setattr(instance, self.wrapped.__name__, value)
|
128 |
+
return value
|
129 |
+
|
130 |
+
|
131 |
+
class _lazy_property_and_property(lazy_property, property):
|
132 |
+
"""We want lazy properties to look like multiple things.
|
133 |
+
|
134 |
+
* property when Sphinx autodoc looks
|
135 |
+
* lazy_property when Distribution validate_args looks
|
136 |
+
"""
|
137 |
+
|
138 |
+
def __init__(self, wrapped):
|
139 |
+
property.__init__(self, wrapped)
|
140 |
+
|
141 |
+
|
142 |
+
def tril_matrix_to_vec(mat: torch.Tensor, diag: int = 0) -> torch.Tensor:
|
143 |
+
r"""
|
144 |
+
Convert a `D x D` matrix or a batch of matrices into a (batched) vector
|
145 |
+
which comprises of lower triangular elements from the matrix in row order.
|
146 |
+
"""
|
147 |
+
n = mat.shape[-1]
|
148 |
+
if not torch._C._get_tracing_state() and (diag < -n or diag >= n):
|
149 |
+
raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n-1}].")
|
150 |
+
arange = torch.arange(n, device=mat.device)
|
151 |
+
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
|
152 |
+
vec = mat[..., tril_mask]
|
153 |
+
return vec
|
154 |
+
|
155 |
+
|
156 |
+
def vec_to_tril_matrix(vec: torch.Tensor, diag: int = 0) -> torch.Tensor:
|
157 |
+
r"""
|
158 |
+
Convert a vector or a batch of vectors into a batched `D x D`
|
159 |
+
lower triangular matrix containing elements from the vector in row order.
|
160 |
+
"""
|
161 |
+
# +ve root of D**2 + (1+2*diag)*D - |diag| * (diag+1) - 2*vec.shape[-1] = 0
|
162 |
+
n = (
|
163 |
+
-(1 + 2 * diag)
|
164 |
+
+ ((1 + 2 * diag) ** 2 + 8 * vec.shape[-1] + 4 * abs(diag) * (diag + 1)) ** 0.5
|
165 |
+
) / 2
|
166 |
+
eps = torch.finfo(vec.dtype).eps
|
167 |
+
if not torch._C._get_tracing_state() and (round(n) - n > eps):
|
168 |
+
raise ValueError(
|
169 |
+
f"The size of last dimension is {vec.shape[-1]} which cannot be expressed as "
|
170 |
+
+ "the lower triangular part of a square D x D matrix."
|
171 |
+
)
|
172 |
+
n = round(n.item()) if isinstance(n, torch.Tensor) else round(n)
|
173 |
+
mat = vec.new_zeros(vec.shape[:-1] + torch.Size((n, n)))
|
174 |
+
arange = torch.arange(n, device=vec.device)
|
175 |
+
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
|
176 |
+
mat[..., tril_mask] = vec
|
177 |
+
return mat
|
venv/lib/python3.10/site-packages/torch/distributions/von_mises.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.jit
|
5 |
+
from torch.distributions import constraints
|
6 |
+
from torch.distributions.distribution import Distribution
|
7 |
+
from torch.distributions.utils import broadcast_all, lazy_property
|
8 |
+
|
9 |
+
__all__ = ["VonMises"]
|
10 |
+
|
11 |
+
|
12 |
+
def _eval_poly(y, coef):
|
13 |
+
coef = list(coef)
|
14 |
+
result = coef.pop()
|
15 |
+
while coef:
|
16 |
+
result = coef.pop() + y * result
|
17 |
+
return result
|
18 |
+
|
19 |
+
|
20 |
+
_I0_COEF_SMALL = [
|
21 |
+
1.0,
|
22 |
+
3.5156229,
|
23 |
+
3.0899424,
|
24 |
+
1.2067492,
|
25 |
+
0.2659732,
|
26 |
+
0.360768e-1,
|
27 |
+
0.45813e-2,
|
28 |
+
]
|
29 |
+
_I0_COEF_LARGE = [
|
30 |
+
0.39894228,
|
31 |
+
0.1328592e-1,
|
32 |
+
0.225319e-2,
|
33 |
+
-0.157565e-2,
|
34 |
+
0.916281e-2,
|
35 |
+
-0.2057706e-1,
|
36 |
+
0.2635537e-1,
|
37 |
+
-0.1647633e-1,
|
38 |
+
0.392377e-2,
|
39 |
+
]
|
40 |
+
_I1_COEF_SMALL = [
|
41 |
+
0.5,
|
42 |
+
0.87890594,
|
43 |
+
0.51498869,
|
44 |
+
0.15084934,
|
45 |
+
0.2658733e-1,
|
46 |
+
0.301532e-2,
|
47 |
+
0.32411e-3,
|
48 |
+
]
|
49 |
+
_I1_COEF_LARGE = [
|
50 |
+
0.39894228,
|
51 |
+
-0.3988024e-1,
|
52 |
+
-0.362018e-2,
|
53 |
+
0.163801e-2,
|
54 |
+
-0.1031555e-1,
|
55 |
+
0.2282967e-1,
|
56 |
+
-0.2895312e-1,
|
57 |
+
0.1787654e-1,
|
58 |
+
-0.420059e-2,
|
59 |
+
]
|
60 |
+
|
61 |
+
_COEF_SMALL = [_I0_COEF_SMALL, _I1_COEF_SMALL]
|
62 |
+
_COEF_LARGE = [_I0_COEF_LARGE, _I1_COEF_LARGE]
|
63 |
+
|
64 |
+
|
65 |
+
def _log_modified_bessel_fn(x, order=0):
|
66 |
+
"""
|
67 |
+
Returns ``log(I_order(x))`` for ``x > 0``,
|
68 |
+
where `order` is either 0 or 1.
|
69 |
+
"""
|
70 |
+
assert order == 0 or order == 1
|
71 |
+
|
72 |
+
# compute small solution
|
73 |
+
y = x / 3.75
|
74 |
+
y = y * y
|
75 |
+
small = _eval_poly(y, _COEF_SMALL[order])
|
76 |
+
if order == 1:
|
77 |
+
small = x.abs() * small
|
78 |
+
small = small.log()
|
79 |
+
|
80 |
+
# compute large solution
|
81 |
+
y = 3.75 / x
|
82 |
+
large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log()
|
83 |
+
|
84 |
+
result = torch.where(x < 3.75, small, large)
|
85 |
+
return result
|
86 |
+
|
87 |
+
|
88 |
+
@torch.jit.script_if_tracing
|
89 |
+
def _rejection_sample(loc, concentration, proposal_r, x):
|
90 |
+
done = torch.zeros(x.shape, dtype=torch.bool, device=loc.device)
|
91 |
+
while not done.all():
|
92 |
+
u = torch.rand((3,) + x.shape, dtype=loc.dtype, device=loc.device)
|
93 |
+
u1, u2, u3 = u.unbind()
|
94 |
+
z = torch.cos(math.pi * u1)
|
95 |
+
f = (1 + proposal_r * z) / (proposal_r + z)
|
96 |
+
c = concentration * (proposal_r - f)
|
97 |
+
accept = ((c * (2 - c) - u2) > 0) | ((c / u2).log() + 1 - c >= 0)
|
98 |
+
if accept.any():
|
99 |
+
x = torch.where(accept, (u3 - 0.5).sign() * f.acos(), x)
|
100 |
+
done = done | accept
|
101 |
+
return (x + math.pi + loc) % (2 * math.pi) - math.pi
|
102 |
+
|
103 |
+
|
104 |
+
class VonMises(Distribution):
|
105 |
+
"""
|
106 |
+
A circular von Mises distribution.
|
107 |
+
|
108 |
+
This implementation uses polar coordinates. The ``loc`` and ``value`` args
|
109 |
+
can be any real number (to facilitate unconstrained optimization), but are
|
110 |
+
interpreted as angles modulo 2 pi.
|
111 |
+
|
112 |
+
Example::
|
113 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
114 |
+
>>> m = VonMises(torch.tensor([1.0]), torch.tensor([1.0]))
|
115 |
+
>>> m.sample() # von Mises distributed with loc=1 and concentration=1
|
116 |
+
tensor([1.9777])
|
117 |
+
|
118 |
+
:param torch.Tensor loc: an angle in radians.
|
119 |
+
:param torch.Tensor concentration: concentration parameter
|
120 |
+
"""
|
121 |
+
|
122 |
+
arg_constraints = {"loc": constraints.real, "concentration": constraints.positive}
|
123 |
+
support = constraints.real
|
124 |
+
has_rsample = False
|
125 |
+
|
126 |
+
def __init__(self, loc, concentration, validate_args=None):
|
127 |
+
self.loc, self.concentration = broadcast_all(loc, concentration)
|
128 |
+
batch_shape = self.loc.shape
|
129 |
+
event_shape = torch.Size()
|
130 |
+
super().__init__(batch_shape, event_shape, validate_args)
|
131 |
+
|
132 |
+
def log_prob(self, value):
|
133 |
+
if self._validate_args:
|
134 |
+
self._validate_sample(value)
|
135 |
+
log_prob = self.concentration * torch.cos(value - self.loc)
|
136 |
+
log_prob = (
|
137 |
+
log_prob
|
138 |
+
- math.log(2 * math.pi)
|
139 |
+
- _log_modified_bessel_fn(self.concentration, order=0)
|
140 |
+
)
|
141 |
+
return log_prob
|
142 |
+
|
143 |
+
@lazy_property
|
144 |
+
def _loc(self):
|
145 |
+
return self.loc.to(torch.double)
|
146 |
+
|
147 |
+
@lazy_property
|
148 |
+
def _concentration(self):
|
149 |
+
return self.concentration.to(torch.double)
|
150 |
+
|
151 |
+
@lazy_property
|
152 |
+
def _proposal_r(self):
|
153 |
+
kappa = self._concentration
|
154 |
+
tau = 1 + (1 + 4 * kappa**2).sqrt()
|
155 |
+
rho = (tau - (2 * tau).sqrt()) / (2 * kappa)
|
156 |
+
_proposal_r = (1 + rho**2) / (2 * rho)
|
157 |
+
# second order Taylor expansion around 0 for small kappa
|
158 |
+
_proposal_r_taylor = 1 / kappa + kappa
|
159 |
+
return torch.where(kappa < 1e-5, _proposal_r_taylor, _proposal_r)
|
160 |
+
|
161 |
+
@torch.no_grad()
|
162 |
+
def sample(self, sample_shape=torch.Size()):
|
163 |
+
"""
|
164 |
+
The sampling algorithm for the von Mises distribution is based on the
|
165 |
+
following paper: D.J. Best and N.I. Fisher, "Efficient simulation of the
|
166 |
+
von Mises distribution." Applied Statistics (1979): 152-157.
|
167 |
+
|
168 |
+
Sampling is always done in double precision internally to avoid a hang
|
169 |
+
in _rejection_sample() for small values of the concentration, which
|
170 |
+
starts to happen for single precision around 1e-4 (see issue #88443).
|
171 |
+
"""
|
172 |
+
shape = self._extended_shape(sample_shape)
|
173 |
+
x = torch.empty(shape, dtype=self._loc.dtype, device=self.loc.device)
|
174 |
+
return _rejection_sample(
|
175 |
+
self._loc, self._concentration, self._proposal_r, x
|
176 |
+
).to(self.loc.dtype)
|
177 |
+
|
178 |
+
def expand(self, batch_shape):
|
179 |
+
try:
|
180 |
+
return super().expand(batch_shape)
|
181 |
+
except NotImplementedError:
|
182 |
+
validate_args = self.__dict__.get("_validate_args")
|
183 |
+
loc = self.loc.expand(batch_shape)
|
184 |
+
concentration = self.concentration.expand(batch_shape)
|
185 |
+
return type(self)(loc, concentration, validate_args=validate_args)
|
186 |
+
|
187 |
+
@property
|
188 |
+
def mean(self):
|
189 |
+
"""
|
190 |
+
The provided mean is the circular one.
|
191 |
+
"""
|
192 |
+
return self.loc
|
193 |
+
|
194 |
+
@property
|
195 |
+
def mode(self):
|
196 |
+
return self.loc
|
197 |
+
|
198 |
+
@lazy_property
|
199 |
+
def variance(self):
|
200 |
+
"""
|
201 |
+
The provided variance is the circular one.
|
202 |
+
"""
|
203 |
+
return (
|
204 |
+
1
|
205 |
+
- (
|
206 |
+
_log_modified_bessel_fn(self.concentration, order=1)
|
207 |
+
- _log_modified_bessel_fn(self.concentration, order=0)
|
208 |
+
).exp()
|
209 |
+
)
|
venv/lib/python3.10/site-packages/torch/distributions/weibull.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.distributions import constraints
|
3 |
+
from torch.distributions.exponential import Exponential
|
4 |
+
from torch.distributions.gumbel import euler_constant
|
5 |
+
from torch.distributions.transformed_distribution import TransformedDistribution
|
6 |
+
from torch.distributions.transforms import AffineTransform, PowerTransform
|
7 |
+
from torch.distributions.utils import broadcast_all
|
8 |
+
|
9 |
+
__all__ = ["Weibull"]
|
10 |
+
|
11 |
+
|
12 |
+
class Weibull(TransformedDistribution):
|
13 |
+
r"""
|
14 |
+
Samples from a two-parameter Weibull distribution.
|
15 |
+
|
16 |
+
Example:
|
17 |
+
|
18 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
19 |
+
>>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0]))
|
20 |
+
>>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1
|
21 |
+
tensor([ 0.4784])
|
22 |
+
|
23 |
+
Args:
|
24 |
+
scale (float or Tensor): Scale parameter of distribution (lambda).
|
25 |
+
concentration (float or Tensor): Concentration parameter of distribution (k/shape).
|
26 |
+
"""
|
27 |
+
arg_constraints = {
|
28 |
+
"scale": constraints.positive,
|
29 |
+
"concentration": constraints.positive,
|
30 |
+
}
|
31 |
+
support = constraints.positive
|
32 |
+
|
33 |
+
def __init__(self, scale, concentration, validate_args=None):
|
34 |
+
self.scale, self.concentration = broadcast_all(scale, concentration)
|
35 |
+
self.concentration_reciprocal = self.concentration.reciprocal()
|
36 |
+
base_dist = Exponential(
|
37 |
+
torch.ones_like(self.scale), validate_args=validate_args
|
38 |
+
)
|
39 |
+
transforms = [
|
40 |
+
PowerTransform(exponent=self.concentration_reciprocal),
|
41 |
+
AffineTransform(loc=0, scale=self.scale),
|
42 |
+
]
|
43 |
+
super().__init__(base_dist, transforms, validate_args=validate_args)
|
44 |
+
|
45 |
+
def expand(self, batch_shape, _instance=None):
|
46 |
+
new = self._get_checked_instance(Weibull, _instance)
|
47 |
+
new.scale = self.scale.expand(batch_shape)
|
48 |
+
new.concentration = self.concentration.expand(batch_shape)
|
49 |
+
new.concentration_reciprocal = new.concentration.reciprocal()
|
50 |
+
base_dist = self.base_dist.expand(batch_shape)
|
51 |
+
transforms = [
|
52 |
+
PowerTransform(exponent=new.concentration_reciprocal),
|
53 |
+
AffineTransform(loc=0, scale=new.scale),
|
54 |
+
]
|
55 |
+
super(Weibull, new).__init__(base_dist, transforms, validate_args=False)
|
56 |
+
new._validate_args = self._validate_args
|
57 |
+
return new
|
58 |
+
|
59 |
+
@property
|
60 |
+
def mean(self):
|
61 |
+
return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))
|
62 |
+
|
63 |
+
@property
|
64 |
+
def mode(self):
|
65 |
+
return (
|
66 |
+
self.scale
|
67 |
+
* ((self.concentration - 1) / self.concentration)
|
68 |
+
** self.concentration.reciprocal()
|
69 |
+
)
|
70 |
+
|
71 |
+
@property
|
72 |
+
def variance(self):
|
73 |
+
return self.scale.pow(2) * (
|
74 |
+
torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal))
|
75 |
+
- torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal))
|
76 |
+
)
|
77 |
+
|
78 |
+
def entropy(self):
|
79 |
+
return (
|
80 |
+
euler_constant * (1 - self.concentration_reciprocal)
|
81 |
+
+ torch.log(self.scale * self.concentration_reciprocal)
|
82 |
+
+ 1
|
83 |
+
)
|
venv/lib/python3.10/site-packages/torch/mps/__init__.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""
|
2 |
+
This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python.
|
3 |
+
Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased
|
4 |
+
performance can be achieved, by running work on the metal GPU(s).
|
5 |
+
See https://developer.apple.com/documentation/metalperformanceshaders for more details.
|
6 |
+
"""
|
7 |
+
import torch
|
8 |
+
from .. import Tensor
|
9 |
+
|
10 |
+
_is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False)
|
11 |
+
_default_mps_generator: torch._C.Generator = None # type: ignore[assignment]
|
12 |
+
|
13 |
+
|
14 |
+
# local helper function (not public or exported)
|
15 |
+
def _get_default_mps_generator() -> torch._C.Generator:
|
16 |
+
global _default_mps_generator
|
17 |
+
if _default_mps_generator is None:
|
18 |
+
_default_mps_generator = torch._C._mps_get_default_generator()
|
19 |
+
return _default_mps_generator
|
20 |
+
|
21 |
+
|
22 |
+
def synchronize() -> None:
|
23 |
+
r"""Waits for all kernels in all streams on a MPS device to complete."""
|
24 |
+
return torch._C._mps_deviceSynchronize()
|
25 |
+
|
26 |
+
|
27 |
+
def get_rng_state() -> Tensor:
|
28 |
+
r"""Returns the random number generator state as a ByteTensor."""
|
29 |
+
return _get_default_mps_generator().get_state()
|
30 |
+
|
31 |
+
|
32 |
+
def set_rng_state(new_state: Tensor) -> None:
|
33 |
+
r"""Sets the random number generator state.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
new_state (torch.ByteTensor): The desired state
|
37 |
+
"""
|
38 |
+
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
|
39 |
+
_get_default_mps_generator().set_state(new_state_copy)
|
40 |
+
|
41 |
+
|
42 |
+
def manual_seed(seed: int) -> None:
|
43 |
+
r"""Sets the seed for generating random numbers.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
seed (int): The desired seed.
|
47 |
+
"""
|
48 |
+
# the torch.mps.manual_seed() can be called from the global
|
49 |
+
# torch.manual_seed() in torch/random.py. So we need to make
|
50 |
+
# sure mps is available (otherwise we just return without
|
51 |
+
# erroring out)
|
52 |
+
if not torch._C._has_mps:
|
53 |
+
return
|
54 |
+
seed = int(seed)
|
55 |
+
_get_default_mps_generator().manual_seed(seed)
|
56 |
+
|
57 |
+
|
58 |
+
def seed() -> None:
|
59 |
+
r"""Sets the seed for generating random numbers to a random number."""
|
60 |
+
_get_default_mps_generator().seed()
|
61 |
+
|
62 |
+
|
63 |
+
def empty_cache() -> None:
|
64 |
+
r"""Releases all unoccupied cached memory currently held by the caching
|
65 |
+
allocator so that those can be used in other GPU applications.
|
66 |
+
"""
|
67 |
+
torch._C._mps_emptyCache()
|
68 |
+
|
69 |
+
|
70 |
+
def set_per_process_memory_fraction(fraction) -> None:
|
71 |
+
r"""Set memory fraction for limiting process's memory allocation on MPS device.
|
72 |
+
The allowed value equals the fraction multiplied by recommended maximum device memory
|
73 |
+
(obtained from Metal API device.recommendedMaxWorkingSetSize).
|
74 |
+
If trying to allocate more than the allowed value in a process, it will raise an out of
|
75 |
+
memory error in allocator.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction.
|
79 |
+
|
80 |
+
.. note::
|
81 |
+
Passing 0 to fraction means unlimited allocations
|
82 |
+
(may cause system failure if out of memory).
|
83 |
+
Passing fraction greater than 1.0 allows limits beyond the value
|
84 |
+
returned from device.recommendedMaxWorkingSetSize.
|
85 |
+
"""
|
86 |
+
|
87 |
+
if not isinstance(fraction, float):
|
88 |
+
raise TypeError("Invalid type for fraction argument, must be `float`")
|
89 |
+
if fraction < 0 or fraction > 2:
|
90 |
+
raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2")
|
91 |
+
|
92 |
+
torch._C._mps_setMemoryFraction(fraction)
|
93 |
+
|
94 |
+
|
95 |
+
def current_allocated_memory() -> int:
|
96 |
+
r"""Returns the current GPU memory occupied by tensors in bytes.
|
97 |
+
|
98 |
+
.. note::
|
99 |
+
The returned size does not include cached allocations in
|
100 |
+
memory pools of MPSAllocator.
|
101 |
+
"""
|
102 |
+
return torch._C._mps_currentAllocatedMemory()
|
103 |
+
|
104 |
+
|
105 |
+
def driver_allocated_memory() -> int:
|
106 |
+
r"""Returns total GPU memory allocated by Metal driver for the process in bytes.
|
107 |
+
|
108 |
+
.. note::
|
109 |
+
The returned size includes cached allocations in MPSAllocator pools
|
110 |
+
as well as allocations from MPS/MPSGraph frameworks.
|
111 |
+
"""
|
112 |
+
return torch._C._mps_driverAllocatedMemory()
|
113 |
+
|
114 |
+
|
115 |
+
from . import profiler
|
116 |
+
from .event import Event
|
117 |
+
|
118 |
+
__all__ = [
|
119 |
+
"get_rng_state",
|
120 |
+
"manual_seed",
|
121 |
+
"seed",
|
122 |
+
"set_rng_state",
|
123 |
+
"synchronize",
|
124 |
+
"empty_cache",
|
125 |
+
"set_per_process_memory_fraction",
|
126 |
+
"current_allocated_memory",
|
127 |
+
"driver_allocated_memory",
|
128 |
+
"Event",
|
129 |
+
"profiler",
|
130 |
+
]
|
venv/lib/python3.10/site-packages/torch/mps/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.54 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/mps/__pycache__/event.cpython-310.pyc
ADDED
Binary file (2.33 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/mps/__pycache__/profiler.cpython-310.pyc
ADDED
Binary file (2.67 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/mps/event.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
class Event:
|
5 |
+
r"""Wrapper around an MPS event.
|
6 |
+
|
7 |
+
MPS events are synchronization markers that can be used to monitor the
|
8 |
+
device's progress, to accurately measure timing, and to synchronize MPS streams.
|
9 |
+
|
10 |
+
Args:
|
11 |
+
enable_timing (bool, optional): indicates if the event should measure time
|
12 |
+
(default: ``False``)
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self, enable_timing=False):
|
16 |
+
self.__eventId = torch._C._mps_acquireEvent(enable_timing)
|
17 |
+
|
18 |
+
def __del__(self):
|
19 |
+
# checks if torch._C is already destroyed
|
20 |
+
if hasattr(torch._C, "_mps_releaseEvent") and self.__eventId > 0:
|
21 |
+
torch._C._mps_releaseEvent(self.__eventId)
|
22 |
+
|
23 |
+
def record(self):
|
24 |
+
r"""Records the event in the default stream."""
|
25 |
+
torch._C._mps_recordEvent(self.__eventId)
|
26 |
+
|
27 |
+
def wait(self):
|
28 |
+
r"""Makes all future work submitted to the default stream wait for this event."""
|
29 |
+
torch._C._mps_waitForEvent(self.__eventId)
|
30 |
+
|
31 |
+
def query(self):
|
32 |
+
r"""Returns True if all work currently captured by event has completed."""
|
33 |
+
return torch._C._mps_queryEvent(self.__eventId)
|
34 |
+
|
35 |
+
def synchronize(self):
|
36 |
+
r"""Waits until the completion of all work currently captured in this event.
|
37 |
+
This prevents the CPU thread from proceeding until the event completes.
|
38 |
+
"""
|
39 |
+
torch._C._mps_synchronizeEvent(self.__eventId)
|
40 |
+
|
41 |
+
def elapsed_time(self, end_event):
|
42 |
+
r"""Returns the time elapsed in milliseconds after the event was
|
43 |
+
recorded and before the end_event was recorded.
|
44 |
+
"""
|
45 |
+
return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)
|
venv/lib/python3.10/site-packages/torch/mps/profiler.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
__all__ = ["start", "stop", "profile"]
|
6 |
+
|
7 |
+
|
8 |
+
def start(mode: str = "interval", wait_until_completed: bool = False) -> None:
|
9 |
+
r"""Start OS Signpost tracing from MPS backend.
|
10 |
+
|
11 |
+
The generated OS Signposts could be recorded and viewed in
|
12 |
+
XCode Instruments Logging tool.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
mode(str): OS Signpost tracing mode could be "interval", "event",
|
16 |
+
or both "interval,event".
|
17 |
+
The interval mode traces the duration of execution of the operations,
|
18 |
+
whereas event mode marks the completion of executions.
|
19 |
+
See document `Recording Performance Data`_ for more info.
|
20 |
+
wait_until_completed(bool): Waits until the MPS Stream complete
|
21 |
+
executing each encoded GPU operation. This helps generating single
|
22 |
+
dispatches on the trace's timeline.
|
23 |
+
Note that enabling this option would affect the performance negatively.
|
24 |
+
|
25 |
+
.. _Recording Performance Data:
|
26 |
+
https://developer.apple.com/documentation/os/logging/recording_performance_data
|
27 |
+
"""
|
28 |
+
mode_normalized = mode.lower().replace(" ", "")
|
29 |
+
torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)
|
30 |
+
|
31 |
+
|
32 |
+
def stop():
|
33 |
+
r"""Stops generating OS Signpost tracing from MPS backend."""
|
34 |
+
torch._C._mps_profilerStopTrace()
|
35 |
+
|
36 |
+
|
37 |
+
@contextlib.contextmanager
|
38 |
+
def profile(mode: str = "interval", wait_until_completed: bool = False):
|
39 |
+
r"""Context Manager to enabling generating OS Signpost tracing from MPS backend.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
mode(str): OS Signpost tracing mode could be "interval", "event",
|
43 |
+
or both "interval,event".
|
44 |
+
The interval mode traces the duration of execution of the operations,
|
45 |
+
whereas event mode marks the completion of executions.
|
46 |
+
See document `Recording Performance Data`_ for more info.
|
47 |
+
wait_until_completed(bool): Waits until the MPS Stream complete
|
48 |
+
executing each encoded GPU operation. This helps generating single
|
49 |
+
dispatches on the trace's timeline.
|
50 |
+
Note that enabling this option would affect the performance negatively.
|
51 |
+
|
52 |
+
.. _Recording Performance Data:
|
53 |
+
https://developer.apple.com/documentation/os/logging/recording_performance_data
|
54 |
+
"""
|
55 |
+
try:
|
56 |
+
start(mode, wait_until_completed)
|
57 |
+
yield
|
58 |
+
finally:
|
59 |
+
stop()
|
venv/lib/python3.10/site-packages/torch/package/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .analyze.is_from_package import is_from_package
|
2 |
+
from .file_structure_representation import Directory
|
3 |
+
from .glob_group import GlobGroup
|
4 |
+
from .importer import (
|
5 |
+
Importer,
|
6 |
+
ObjMismatchError,
|
7 |
+
ObjNotFoundError,
|
8 |
+
OrderedImporter,
|
9 |
+
sys_importer,
|
10 |
+
)
|
11 |
+
from .package_exporter import EmptyMatchError, PackageExporter, PackagingError
|
12 |
+
from .package_importer import PackageImporter
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (658 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc
ADDED
Binary file (5.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc
ADDED
Binary file (2.74 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc
ADDED
Binary file (2.37 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc
ADDED
Binary file (2.17 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc
ADDED
Binary file (2.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc
ADDED
Binary file (2.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc
ADDED
Binary file (1.22 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc
ADDED
Binary file (4.06 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc
ADDED
Binary file (4.62 kB). View file
|
|