applied-ai-018 commited on
Commit
a4fdea9
·
verified ·
1 Parent(s): eb19839

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/10.input_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  8. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/nn/__init__.py +53 -0
  17. venv/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/nn/_reduction.py +47 -0
  26. venv/lib/python3.10/site-packages/torch/nn/common_types.py +42 -0
  27. venv/lib/python3.10/site-packages/torch/nn/cpp.py +88 -0
  28. venv/lib/python3.10/site-packages/torch/nn/functional.py +0 -0
  29. venv/lib/python3.10/site-packages/torch/nn/functional.pyi +682 -0
  30. venv/lib/python3.10/site-packages/torch/nn/grad.py +189 -0
  31. venv/lib/python3.10/site-packages/torch/nn/init.py +626 -0
  32. venv/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py +35 -0
  33. venv/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py +31 -0
  35. venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py +30 -0
  38. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py +1 -0
  39. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py +31 -0
  41. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py +37 -0
  46. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py +15 -0
  47. venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py +15 -0
  48. venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py +13 -0
  49. venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py +1 -0
ckpts/universal/global_step120/zero/10.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753db498886b0970ef05041cab3603b75cc30f6ba852156452ada9e0c25a2977
3
+ size 9293
ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ba863251b7c23925746dff9082ae599badeb4067575eeeaa22dd51242660040
3
+ size 33555612
ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c553688fbf3bc565373940f4c5024d8c58e3461784e2400b1f92fe224acaaaa0
3
+ size 33555627
ckpts/universal/global_step120/zero/26.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41d7924df01a2b0ff858ad0228241b8e948f8429d76619518aabfb278b94a5b5
3
+ size 33555533
ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d9f4229e89115daf3fbd9dfcd302b87934f98c66c9d939d62a329af7e1c2e91
3
+ size 9387
ckpts/universal/global_step120/zero/4.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92192f6e4909580afb7198c0ccd041ef87360774e17ba574c8275b84e7156067
3
+ size 9293
ckpts/universal/global_step120/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:045ba55ef5492f12a1502f8fb90bb7cc9643867a1ebee814d028c0e1b9e1257e
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc ADDED
Binary file (6.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc ADDED
Binary file (555 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc ADDED
Binary file (266 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc ADDED
Binary file (5.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+ from .parameter import (
3
+ Parameter as Parameter,
4
+ UninitializedParameter as UninitializedParameter,
5
+ UninitializedBuffer as UninitializedBuffer,
6
+ )
7
+ from .parallel import DataParallel as DataParallel
8
+ from . import init
9
+ from . import functional
10
+ from . import utils
11
+ from . import attention
12
+
13
+
14
+ def factory_kwargs(kwargs):
15
+ r"""Return a canonicalized dict of factory kwargs.
16
+
17
+ Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed
18
+ to factory functions like torch.empty, or errors if unrecognized kwargs are present.
19
+
20
+ This function makes it simple to write code like this::
21
+
22
+ class MyModule(nn.Module):
23
+ def __init__(self, **kwargs):
24
+ factory_kwargs = torch.nn.factory_kwargs(kwargs)
25
+ self.weight = Parameter(torch.empty(10, **factory_kwargs))
26
+
27
+ Why should you use this function instead of just passing `kwargs` along directly?
28
+
29
+ 1. This function does error validation, so if there are unexpected kwargs we will
30
+ immediately report an error, instead of deferring it to the factory call
31
+ 2. This function supports a special `factory_kwargs` argument, which can be used to
32
+ explicitly specify a kwarg to be used for factory functions, in the event one of the
33
+ factory kwargs conflicts with an already existing argument in the signature (e.g.
34
+ in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory
35
+ functions, as distinct from the dtype argument, by saying
36
+ ``f(dtype1, factory_kwargs={"dtype": dtype2})``)
37
+ """
38
+ if kwargs is None:
39
+ return {}
40
+ simple_keys = {"device", "dtype", "memory_format"}
41
+ expected_keys = simple_keys | {"factory_kwargs"}
42
+ if not kwargs.keys() <= expected_keys:
43
+ raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}")
44
+
45
+ # guarantee no input kwargs is untouched
46
+ r = dict(kwargs.get("factory_kwargs", {}))
47
+ for k in simple_keys:
48
+ if k in kwargs:
49
+ if k in r:
50
+ raise TypeError(f"{k} specified twice, in **kwargs and in factory_kwargs")
51
+ r[k] = kwargs[k]
52
+
53
+ return r
venv/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc ADDED
Binary file (177 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc ADDED
Binary file (9.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/_reduction.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import warnings
3
+
4
+ # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
5
+
6
+
7
+ def get_enum(reduction: str) -> int:
8
+ if reduction == 'none':
9
+ ret = 0
10
+ elif reduction == 'mean':
11
+ ret = 1
12
+ elif reduction == 'elementwise_mean':
13
+ warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
14
+ ret = 1
15
+ elif reduction == 'sum':
16
+ ret = 2
17
+ else:
18
+ ret = -1 # TODO: remove once JIT exceptions support control flow
19
+ raise ValueError(f"{reduction} is not a valid value for reduction")
20
+ return ret
21
+
22
+ # In order to support previous versions, accept boolean size_average and reduce
23
+ # and convert them into the new constants for now
24
+
25
+
26
+ # We use these functions in torch/legacy as well, in which case we'll silence the warning
27
+ def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
28
+ warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
29
+
30
+ if size_average is None:
31
+ size_average = True
32
+ if reduce is None:
33
+ reduce = True
34
+
35
+ if size_average and reduce:
36
+ ret = 'mean'
37
+ elif reduce:
38
+ ret = 'sum'
39
+ else:
40
+ ret = 'none'
41
+ if emit_warning:
42
+ warnings.warn(warning.format(ret))
43
+ return ret
44
+
45
+
46
+ def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
47
+ return get_enum(legacy_get_string(size_average, reduce, emit_warning))
venv/lib/python3.10/site-packages/torch/nn/common_types.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar, Union, Tuple, Optional
2
+ from .. import Tensor
3
+
4
+ # Create some useful type aliases
5
+
6
+ # Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally
7
+ # broadcast to a tuple.
8
+ # Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations.
9
+ T = TypeVar('T')
10
+ _scalar_or_tuple_any_t = Union[T, Tuple[T, ...]]
11
+ _scalar_or_tuple_1_t = Union[T, Tuple[T]]
12
+ _scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
13
+ _scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
14
+ _scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
15
+ _scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
16
+ _scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]
17
+
18
+ # For arguments which represent size parameters (eg, kernel size, padding)
19
+ _size_any_t = _scalar_or_tuple_any_t[int]
20
+ _size_1_t = _scalar_or_tuple_1_t[int]
21
+ _size_2_t = _scalar_or_tuple_2_t[int]
22
+ _size_3_t = _scalar_or_tuple_3_t[int]
23
+ _size_4_t = _scalar_or_tuple_4_t[int]
24
+ _size_5_t = _scalar_or_tuple_5_t[int]
25
+ _size_6_t = _scalar_or_tuple_6_t[int]
26
+
27
+ # For arguments which represent optional size parameters (eg, adaptive pool parameters)
28
+ _size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]]
29
+ _size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]]
30
+ _size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]]
31
+
32
+ # For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
33
+ _ratio_2_t = _scalar_or_tuple_2_t[float]
34
+ _ratio_3_t = _scalar_or_tuple_3_t[float]
35
+ _ratio_any_t = _scalar_or_tuple_any_t[float]
36
+
37
+ _tensor_list_t = _scalar_or_tuple_any_t[Tensor]
38
+
39
+ # For the return value of max pooling operations that may or may not return indices.
40
+ # With the proposed 'Literal' feature to Python typing, it might be possible to
41
+ # eventually eliminate this.
42
+ _maybe_indices_t = _scalar_or_tuple_2_t[Tensor]
venv/lib/python3.10/site-packages/torch/nn/cpp.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functionality for Python <-> C++ frontend inter-op."""
2
+
3
+ from torch import nn
4
+
5
+
6
+ class OrderedDictWrapper:
7
+ """A wrapper around a C++ OrderedDict.
8
+
9
+ It dynamically evaluates the OrderedDict getter on a bound C++ module, such
10
+ that new changes on the C++ side are picked up. Otherwise accessing e.g.
11
+ ``cpp_module._parameters`` just once would get a frozen copy of the parameters
12
+ at the time of access. ``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__``
13
+ so using properties does not work.
14
+ """
15
+
16
+ def __init__(self, cpp_module, attr):
17
+ self.cpp_module = cpp_module
18
+ self.attr = attr
19
+
20
+ @property
21
+ def cpp_dict(self):
22
+ return getattr(self.cpp_module, self.attr)
23
+
24
+ # Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
25
+ # must manually override them.
26
+
27
+ def items(self):
28
+ return self.cpp_dict.items()
29
+
30
+ def keys(self):
31
+ return self.cpp_dict.keys()
32
+
33
+ def values(self):
34
+ return self.cpp_dict.values()
35
+
36
+ def __iter__(self):
37
+ return self.cpp_dict.__iter__()
38
+
39
+ def __len__(self):
40
+ return self.cpp_dict.__len__()
41
+
42
+ def __contains__(self, key):
43
+ return self.cpp_dict.__contains__(key)
44
+
45
+ def __getitem__(self, key):
46
+ return self.cpp_dict.__getitem__(key)
47
+
48
+
49
+ class ModuleWrapper(nn.Module):
50
+ """A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and delegates all access."""
51
+
52
+ def __init__(self, cpp_module):
53
+ # Assign before the super class constructor so ``self.training`` can be
54
+ # assigned to in the super class constructor.
55
+ self.cpp_module = cpp_module
56
+ super().__init__()
57
+ self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
58
+ self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
59
+ self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
60
+ for attr in dir(cpp_module):
61
+ # Skip magic methods and the three attributes above.
62
+ if not attr.startswith("_"):
63
+ setattr(self, attr, getattr(self.cpp_module, attr))
64
+
65
+ def _apply(self, fn, recurse=True):
66
+ for param in self.parameters():
67
+ # Tensors stored in modules are graph leaves, and we don't
68
+ # want to create copy nodes, so we have to unpack the data.
69
+ param.data = fn(param.data)
70
+ if param._grad is not None:
71
+ param._grad.data = fn(param._grad.data)
72
+
73
+ for buf in self.buffers():
74
+ buf.data = fn(buf.data)
75
+
76
+ return self
77
+
78
+ # nn.Module defines training as a boolean
79
+ @property # type: ignore[override]
80
+ def training(self):
81
+ return self.cpp_module.training
82
+
83
+ @training.setter
84
+ def training(self, mode):
85
+ self.cpp_module.train(mode)
86
+
87
+ def __repr__(self):
88
+ return self.cpp_module.__repr__()
venv/lib/python3.10/site-packages/torch/nn/functional.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/nn/functional.pyi ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Any,
3
+ Callable,
4
+ Dict,
5
+ List,
6
+ Literal,
7
+ Optional,
8
+ overload,
9
+ Sequence,
10
+ Tuple,
11
+ Union,
12
+ )
13
+
14
+ from torch import Tensor
15
+ from torch.types import _dtype, _int, _size
16
+
17
+ from .common_types import (
18
+ _ratio_any_t,
19
+ _size_1_t,
20
+ _size_2_opt_t,
21
+ _size_2_t,
22
+ _size_3_opt_t,
23
+ _size_3_t,
24
+ _size_any_t,
25
+ )
26
+
27
+ # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
28
+ # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
29
+ # is wide-spread.
30
+
31
+ # from mypy_extensions import TypedDict
32
+
33
+ # GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int})
34
+ # GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int})
35
+
36
+ GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int]
37
+ GRID_SAMPLE_PADDING_MODES = Dict[str, int]
38
+
39
+ # These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning.
40
+ #
41
+ # The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate.
42
+ # This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence`
43
+ # type. There is no way to express the expected lengths of these lists in the current Python typing system.
44
+ #
45
+ # Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were
46
+ # deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code
47
+ # generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system
48
+ # to encode the type semantics of `_add_docstr`, should that system ever become widespread.
49
+ def fractional_max_pool2d_with_indices(
50
+ input: Tensor,
51
+ kernel_size: _size,
52
+ output_size: Optional[_size] = ...,
53
+ output_ratio: Optional[_ratio_any_t] = ...,
54
+ return_indices: bool = ...,
55
+ _random_samples: Optional[Tensor] = ...,
56
+ ) -> Tuple[Tensor, Tensor]: ...
57
+ def fractional_max_pool3d_with_indices(
58
+ input: Tensor,
59
+ kernel_size: _size,
60
+ output_size: Optional[_size] = ...,
61
+ output_ratio: Optional[_ratio_any_t] = ...,
62
+ return_indices: bool = ...,
63
+ _random_samples: Optional[Tensor] = ...,
64
+ ) -> Tuple[Tensor, Tensor]: ...
65
+ def max_pool1d_with_indices(
66
+ input: Tensor,
67
+ kernel_size: _size,
68
+ stride: Optional[_size] = ...,
69
+ padding: _size = ...,
70
+ dilation: _size = ...,
71
+ ceil_mode: bool = ...,
72
+ return_indices: bool = ...,
73
+ ) -> Tuple[Tensor, Tensor]: ...
74
+ def max_pool2d_with_indices(
75
+ input: Tensor,
76
+ kernel_size: _size,
77
+ stride: Optional[_size] = ...,
78
+ padding: _size = ...,
79
+ dilation: _size = ...,
80
+ ceil_mode: bool = ...,
81
+ return_indices: bool = ...,
82
+ ) -> Tuple[Tensor, Tensor]: ...
83
+ def max_pool3d_with_indices(
84
+ input: Tensor,
85
+ kernel_size: _size,
86
+ stride: Optional[_size] = ...,
87
+ padding: _size = ...,
88
+ dilation: _size = ...,
89
+ ceil_mode: bool = ...,
90
+ return_indices: bool = ...,
91
+ ) -> Tuple[Tensor, Tensor]: ...
92
+ def max_unpool1d(
93
+ input: Tensor,
94
+ indices: Tensor,
95
+ kernel_size: _size,
96
+ stride: Optional[_size] = ...,
97
+ padding: _size = ...,
98
+ output_size: Optional[_size] = ...,
99
+ ) -> Tensor: ...
100
+ def max_unpool2d(
101
+ input: Tensor,
102
+ indices: Tensor,
103
+ kernel_size: _size,
104
+ stride: Optional[_size] = ...,
105
+ padding: _size = ...,
106
+ output_size: Optional[_size] = ...,
107
+ ) -> Tensor: ...
108
+ def max_unpool3d(
109
+ input: Tensor,
110
+ indices: Tensor,
111
+ kernel_size: _size,
112
+ stride: Optional[_size] = ...,
113
+ padding: _size = ...,
114
+ output_size: Optional[_size] = ...,
115
+ ) -> Tensor: ...
116
+ def lp_pool1d(
117
+ input: Tensor,
118
+ norm_type: float,
119
+ kernel_size: _size_1_t,
120
+ stride: Union[Optional[_size], Optional[int]] = ...,
121
+ ceil_mode: bool = ...,
122
+ ) -> Tensor: ...
123
+ def lp_pool2d(
124
+ input: Tensor,
125
+ norm_type: float,
126
+ kernel_size: _size_2_t,
127
+ stride: Union[Optional[_size], Optional[int]] = ...,
128
+ ceil_mode: bool = ...,
129
+ ) -> Tensor: ...
130
+ def lp_pool3d(
131
+ input: Tensor,
132
+ norm_type: float,
133
+ kernel_size: _size_3_t,
134
+ stride: Union[Optional[_size], Optional[int]] = ...,
135
+ ceil_mode: bool = ...,
136
+ ) -> Tensor: ...
137
+ def adaptive_max_pool1d_with_indices(
138
+ input: Tensor,
139
+ output_size: _size,
140
+ return_indices: bool = ...,
141
+ ) -> Tuple[Tensor, Tensor]: ...
142
+ def adaptive_max_pool2d_with_indices(
143
+ input: Tensor,
144
+ output_size: _size_2_opt_t,
145
+ return_indices: bool = ...,
146
+ ) -> Tuple[Tensor, Tensor]: ...
147
+ def adaptive_max_pool3d_with_indices(
148
+ input: Tensor,
149
+ output_size: _size_3_opt_t,
150
+ return_indices: bool = ...,
151
+ ) -> Tuple[Tensor, Tensor]: ...
152
+ def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ...
153
+ def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ...
154
+ def dropout(
155
+ input: Tensor,
156
+ p: float = ...,
157
+ training: bool = ...,
158
+ inplace: bool = ...,
159
+ ) -> Tensor: ...
160
+ def alpha_dropout(
161
+ input: Tensor,
162
+ p: float = ...,
163
+ training: bool = ...,
164
+ inplace: bool = ...,
165
+ ) -> Tensor: ...
166
+ def dropout1d(
167
+ input: Tensor,
168
+ p: float = ...,
169
+ training: bool = ...,
170
+ inplace: bool = ...,
171
+ ) -> Tensor: ...
172
+ def dropout2d(
173
+ input: Tensor,
174
+ p: float = ...,
175
+ training: bool = ...,
176
+ inplace: bool = ...,
177
+ ) -> Tensor: ...
178
+ def dropout3d(
179
+ input: Tensor,
180
+ p: float = ...,
181
+ training: bool = ...,
182
+ inplace: bool = ...,
183
+ ) -> Tensor: ...
184
+ def feature_alpha_dropout(
185
+ input: Tensor,
186
+ p: float = ...,
187
+ training: bool = ...,
188
+ inplace: bool = ...,
189
+ ) -> Tensor: ...
190
+ def threshold(
191
+ input: Tensor,
192
+ threshold: float,
193
+ value: float,
194
+ inplace: bool = ...,
195
+ ) -> Tensor: ...
196
+ def relu(input: Tensor, inplace: bool = ...) -> Tensor: ...
197
+ def glu(input: Tensor, dim: int = ...) -> Tensor: ...
198
+ def hardtanh(
199
+ input: Tensor,
200
+ min_val: float = ...,
201
+ max_val: float = ...,
202
+ inplace: bool = ...,
203
+ ) -> Tensor: ...
204
+ def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ...
205
+ def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
206
+ def selu(input: Tensor, inplace: bool = ...) -> Tensor: ...
207
+ def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
208
+ def leaky_relu(
209
+ input: Tensor,
210
+ negative_slope: float = ...,
211
+ inplace: bool = ...,
212
+ ) -> Tensor: ...
213
+ def rrelu(
214
+ input: Tensor,
215
+ lower: float = ...,
216
+ upper: float = ...,
217
+ training: bool = ...,
218
+ inplace: bool = ...,
219
+ ) -> Tensor: ...
220
+ def tanhshrink(input: Any): ...
221
+ def softsign(input: Any): ...
222
+ def softmin(
223
+ input: Tensor,
224
+ dim: Optional[int] = ...,
225
+ _stacklevel: int = ...,
226
+ dtype: Optional[_dtype] = ...,
227
+ ) -> Tensor: ...
228
+ def softmax(
229
+ input: Tensor,
230
+ dim: Optional[int] = ...,
231
+ _stacklevel: int = ...,
232
+ dtype: Optional[_dtype] = ...,
233
+ ) -> Tensor: ...
234
+ def gumbel_softmax(
235
+ logits: Tensor,
236
+ tau: float = ...,
237
+ hard: bool = ...,
238
+ eps: float = ...,
239
+ dim: int = ...,
240
+ ) -> Tensor: ...
241
+ def log_softmax(
242
+ input: Tensor,
243
+ dim: Optional[int] = ...,
244
+ _stacklevel: int = ...,
245
+ dtype: Optional[_dtype] = ...,
246
+ ) -> Tensor: ...
247
+ def tanh(input: Any): ...
248
+ def sigmoid(input: Any) -> Tensor: ...
249
+ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ...
250
+ def silu(input: Tensor, inplace: bool = False) -> Tensor: ...
251
+ def mish(input: Tensor, inplace: bool = False) -> Tensor: ...
252
+ def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ...
253
+ def embedding(
254
+ input: Tensor,
255
+ weight: Tensor,
256
+ padding_idx: Optional[int] = ...,
257
+ max_norm: Optional[float] = ...,
258
+ norm_type: float = ...,
259
+ scale_grad_by_freq: bool = ...,
260
+ sparse: bool = ...,
261
+ ) -> Tensor: ...
262
+ def embedding_bag(
263
+ input: Tensor,
264
+ weight: Tensor,
265
+ offsets: Optional[Tensor] = ...,
266
+ max_norm: Optional[float] = ...,
267
+ norm_type: float = ...,
268
+ scale_grad_by_freq: bool = ...,
269
+ mode: str = ...,
270
+ sparse: bool = ...,
271
+ per_sample_weights: Optional[Tensor] = ...,
272
+ include_last_offset: bool = ...,
273
+ padding_idx: Optional[int] = ...,
274
+ ) -> Tensor: ...
275
+ def batch_norm(
276
+ input: Tensor,
277
+ running_mean: Optional[Tensor],
278
+ running_var: Optional[Tensor],
279
+ weight: Optional[Tensor] = ...,
280
+ bias: Optional[Tensor] = ...,
281
+ training: bool = ...,
282
+ momentum: float = ...,
283
+ eps: float = ...,
284
+ ) -> Tensor: ...
285
+ def instance_norm(
286
+ input: Tensor,
287
+ running_mean: Optional[Tensor] = ...,
288
+ running_var: Optional[Tensor] = ...,
289
+ weight: Optional[Tensor] = ...,
290
+ bias: Optional[Tensor] = ...,
291
+ use_input_stats: bool = ...,
292
+ momentum: float = ...,
293
+ eps: float = ...,
294
+ ) -> Tensor: ...
295
+ def layer_norm(
296
+ input: Tensor,
297
+ normalized_shape: Sequence[int],
298
+ weight: Optional[Tensor] = ...,
299
+ bias: Optional[Tensor] = ...,
300
+ eps: float = ...,
301
+ ) -> Tensor: ...
302
+ def group_norm(
303
+ input: Tensor,
304
+ num_groups: int,
305
+ weight: Optional[Tensor] = ...,
306
+ bias: Optional[Tensor] = ...,
307
+ eps: float = ...,
308
+ ) -> Tensor: ...
309
+ def local_response_norm(
310
+ input: Tensor,
311
+ size: int,
312
+ alpha: float = ...,
313
+ beta: float = ...,
314
+ k: float = ...,
315
+ ) -> Tensor: ...
316
+ def ctc_loss(
317
+ log_probs: Tensor,
318
+ targets: Tensor,
319
+ input_lengths: Tensor,
320
+ target_lengths: Tensor,
321
+ blank: int = ...,
322
+ reduction: str = ...,
323
+ zero_infinity: bool = ...,
324
+ ) -> Tensor: ...
325
+ def nll_loss(
326
+ input: Tensor,
327
+ target: Tensor,
328
+ weight: Optional[Tensor] = ...,
329
+ size_average: Optional[bool] = ...,
330
+ ignore_index: int = ...,
331
+ reduce: Optional[bool] = ...,
332
+ reduction: str = ...,
333
+ ) -> Tensor: ...
334
+ def poisson_nll_loss(
335
+ input: Tensor,
336
+ target: Tensor,
337
+ log_input: bool = ...,
338
+ full: bool = ...,
339
+ size_average: Optional[bool] = ...,
340
+ eps: float = ...,
341
+ reduce: Optional[bool] = ...,
342
+ reduction: str = ...,
343
+ ) -> Tensor: ...
344
+ def gaussian_nll_loss(
345
+ input: Tensor,
346
+ target: Tensor,
347
+ var: Tensor,
348
+ full: Optional[bool] = ...,
349
+ eps: Optional[float] = ...,
350
+ reduction: Optional[str] = ...,
351
+ ) -> Tensor: ...
352
+ def kl_div(
353
+ input: Tensor,
354
+ target: Tensor,
355
+ size_average: Optional[bool] = ...,
356
+ reduce: Optional[bool] = ...,
357
+ reduction: str = ...,
358
+ log_target: bool = ...,
359
+ ) -> Tensor: ...
360
+ def cross_entropy(
361
+ input: Tensor,
362
+ target: Tensor,
363
+ weight: Optional[Tensor] = ...,
364
+ size_average: Optional[bool] = ...,
365
+ ignore_index: int = ...,
366
+ reduce: Optional[bool] = ...,
367
+ reduction: str = ...,
368
+ label_smoothing: float = ...,
369
+ ) -> Tensor: ...
370
+ def binary_cross_entropy(
371
+ input: Tensor,
372
+ target: Tensor,
373
+ weight: Optional[Tensor] = ...,
374
+ size_average: Optional[bool] = ...,
375
+ reduce: Optional[bool] = ...,
376
+ reduction: str = ...,
377
+ ) -> Tensor: ...
378
+ def binary_cross_entropy_with_logits(
379
+ input: Tensor,
380
+ target: Tensor,
381
+ weight: Optional[Tensor] = ...,
382
+ size_average: Optional[bool] = ...,
383
+ reduce: Optional[bool] = ...,
384
+ reduction: str = ...,
385
+ pos_weight: Optional[Tensor] = ...,
386
+ ) -> Tensor: ...
387
+ def smooth_l1_loss(
388
+ input: Tensor,
389
+ target: Tensor,
390
+ size_average: Optional[bool] = ...,
391
+ reduce: Optional[bool] = ...,
392
+ reduction: str = ...,
393
+ beta: float = ...,
394
+ ) -> Tensor: ...
395
+ def huber_loss(
396
+ input: Tensor,
397
+ target: Tensor,
398
+ reduction: str = ...,
399
+ delta: float = ...,
400
+ ) -> Tensor: ...
401
+ def l1_loss(
402
+ input: Tensor,
403
+ target: Tensor,
404
+ size_average: Optional[bool] = ...,
405
+ reduce: Optional[bool] = ...,
406
+ reduction: str = ...,
407
+ ) -> Tensor: ...
408
+ def mse_loss(
409
+ input: Tensor,
410
+ target: Tensor,
411
+ size_average: Optional[bool] = ...,
412
+ reduce: Optional[bool] = ...,
413
+ reduction: str = ...,
414
+ ) -> Tensor: ...
415
+ def margin_ranking_loss(
416
+ input1: Tensor,
417
+ input2: Tensor,
418
+ target: Tensor,
419
+ margin: float = ...,
420
+ size_average: Optional[bool] = ...,
421
+ reduce: Optional[bool] = ...,
422
+ reduction: str = ...,
423
+ ) -> Tensor: ...
424
+ def hinge_embedding_loss(
425
+ input: Tensor,
426
+ target: Tensor,
427
+ margin: float = ...,
428
+ size_average: Optional[bool] = ...,
429
+ reduce: Optional[bool] = ...,
430
+ reduction: str = ...,
431
+ ) -> Tensor: ...
432
+ def multilabel_margin_loss(
433
+ input: Tensor,
434
+ target: Tensor,
435
+ size_average: Optional[bool] = ...,
436
+ reduce: Optional[bool] = ...,
437
+ reduction: str = ...,
438
+ ) -> Tensor: ...
439
+ def soft_margin_loss(
440
+ input: Tensor,
441
+ target: Tensor,
442
+ size_average: Optional[bool] = ...,
443
+ reduce: Optional[bool] = ...,
444
+ reduction: str = ...,
445
+ ) -> Tensor: ...
446
+ def multilabel_soft_margin_loss(
447
+ input: Tensor,
448
+ target: Tensor,
449
+ weight: Optional[Tensor] = ...,
450
+ size_average: Optional[bool] = ...,
451
+ reduce: Optional[bool] = ...,
452
+ reduction: str = ...,
453
+ ) -> Tensor: ...
454
+ def cosine_embedding_loss(
455
+ input1: Tensor,
456
+ input2: Tensor,
457
+ target: Tensor,
458
+ margin: float = ...,
459
+ size_average: Optional[bool] = ...,
460
+ reduce: Optional[bool] = ...,
461
+ reduction: str = ...,
462
+ ) -> Tensor: ...
463
+ def multi_margin_loss(
464
+ input: Tensor,
465
+ target: Tensor,
466
+ p: int = ...,
467
+ margin: float = ...,
468
+ weight: Optional[Tensor] = ...,
469
+ size_average: Optional[bool] = ...,
470
+ reduce: Optional[bool] = ...,
471
+ reduction: str = ...,
472
+ ) -> Tensor: ...
473
+ def upsample(
474
+ input: Any,
475
+ size: Optional[Any] = ...,
476
+ scale_factor: Optional[Any] = ...,
477
+ mode: str = ...,
478
+ align_corners: Optional[Any] = ...,
479
+ ): ...
480
+ def interpolate(
481
+ input: Any,
482
+ size: Optional[Any] = ...,
483
+ scale_factor: Optional[Any] = ...,
484
+ mode: str = ...,
485
+ align_corners: Optional[Any] = ...,
486
+ recompute_scale_factor: Optional[Any] = ...,
487
+ antialias: bool = ...,
488
+ ): ...
489
+ def upsample_nearest(
490
+ input: Any,
491
+ size: Optional[Any] = ...,
492
+ scale_factor: Optional[Any] = ...,
493
+ ): ...
494
+ def upsample_bilinear(
495
+ input: Any,
496
+ size: Optional[Any] = ...,
497
+ scale_factor: Optional[Any] = ...,
498
+ ): ...
499
+ def grid_sample(
500
+ input: Tensor,
501
+ grid: Tensor,
502
+ mode: str = ...,
503
+ padding_mode: str = ...,
504
+ align_corners: Optional[Any] = ...,
505
+ ) -> Tensor: ...
506
+ def affine_grid(
507
+ theta: Tensor,
508
+ size: List[int],
509
+ align_corners: Optional[Any] = ...,
510
+ ) -> Tensor: ...
511
+ def triplet_margin_loss(
512
+ anchor: Tensor,
513
+ positive: Tensor,
514
+ negative: Tensor,
515
+ margin: float = ...,
516
+ p: float = ...,
517
+ eps: float = ...,
518
+ swap: bool = ...,
519
+ size_average: Optional[bool] = ...,
520
+ reduce: Optional[bool] = ...,
521
+ reduction: str = ...,
522
+ ) -> Tensor: ...
523
+ def triplet_margin_with_distance_loss(
524
+ anchor: Tensor,
525
+ positive: Tensor,
526
+ negative: Tensor,
527
+ *,
528
+ distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ...,
529
+ margin: float = ...,
530
+ swap: bool = ...,
531
+ reduction: str = ...,
532
+ ) -> Tensor: ...
533
+ def normalize(
534
+ input: Tensor,
535
+ p: float = ...,
536
+ dim: int = ...,
537
+ eps: float = ...,
538
+ out: Optional[Tensor] = ...,
539
+ ) -> Tensor: ...
540
+ def assert_int_or_pair(
541
+ arg: Any,
542
+ arg_name: Any,
543
+ message: Any,
544
+ ) -> None: ...
545
+ def unfold(
546
+ input: Tensor,
547
+ kernel_size: _size_any_t,
548
+ dilation: _size_any_t = ...,
549
+ padding: _size_any_t = ...,
550
+ stride: _size_any_t = ...,
551
+ ) -> Tensor: ...
552
+ def fold(
553
+ input: Tensor,
554
+ output_size: _size_any_t,
555
+ kernel_size: _size_any_t,
556
+ dilation: _size_any_t = ...,
557
+ padding: _size_any_t = ...,
558
+ stride: _size_any_t = ...,
559
+ ) -> Tensor: ...
560
+ def _canonical_mask(
561
+ mask: Optional[Tensor],
562
+ mask_name: str,
563
+ other_type: Optional[_dtype],
564
+ other_name: str,
565
+ target_type: _dtype,
566
+ check_other: bool = True,
567
+ ) -> Optional[Tensor]: ...
568
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ...
569
+ def multi_head_attention_forward(
570
+ query: Tensor,
571
+ key: Tensor,
572
+ value: Tensor,
573
+ embed_dim_to_check: int,
574
+ num_heads: int,
575
+ in_proj_weight: Optional[Tensor],
576
+ in_proj_bias: Optional[Tensor],
577
+ bias_k: Optional[Tensor],
578
+ bias_v: Optional[Tensor],
579
+ add_zero_attn: bool,
580
+ dropout_p: float,
581
+ out_proj_weight: Tensor,
582
+ out_proj_bias: Optional[Tensor],
583
+ training: bool = True,
584
+ key_padding_mask: Optional[Tensor] = None,
585
+ need_weights: bool = True,
586
+ attn_mask: Optional[Tensor] = None,
587
+ use_separate_proj_weight: bool = False,
588
+ q_proj_weight: Optional[Tensor] = None,
589
+ k_proj_weight: Optional[Tensor] = None,
590
+ v_proj_weight: Optional[Tensor] = None,
591
+ static_k: Optional[Tensor] = None,
592
+ static_v: Optional[Tensor] = None,
593
+ average_attn_weights: bool = True,
594
+ is_causal: bool = False,
595
+ ) -> Tuple[Tensor, Optional[Tensor]]: ...
596
+
597
+ from .. import conv1d as conv1d
598
+ from .. import conv2d as conv2d
599
+ from .. import conv3d as conv3d
600
+ from .. import conv_transpose1d as conv_transpose1d
601
+ from .. import conv_transpose2d as conv_transpose2d
602
+ from .. import conv_transpose3d as conv_transpose3d
603
+ from .. import conv_tbc as conv_tbc
604
+ from .. import avg_pool1d as avg_pool1d
605
+ from .. import adaptive_avg_pool1d as adaptive_avg_pool1d
606
+ from .. import relu_ as relu_
607
+ from .. import selu_ as selu_
608
+ from .. import celu_ as celu_
609
+ from .. import prelu as prelu
610
+ from .. import rrelu_ as rrelu_
611
+ from .. import hardshrink as hardshrink
612
+ from .. import bilinear as bilinear
613
+ from .. import pixel_shuffle as pixel_shuffle
614
+ from .. import pixel_unshuffle as pixel_unshuffle
615
+ from .. import channel_shuffle as channel_shuffle
616
+ from .. import native_channel_shuffle as native_channel_shuffle
617
+ from .. import pairwise_distance as pairwise_distance
618
+ from .. import pdist as pdist
619
+ from .. import cosine_similarity as cosine_similarity
620
+ from .._C._nn import avg_pool2d as avg_pool2d
621
+ from .._C._nn import avg_pool3d as avg_pool3d
622
+ from .._C._nn import hardtanh_ as hardtanh_
623
+ from .._C._nn import elu_ as elu_
624
+ from .._C._nn import leaky_relu_ as leaky_relu_
625
+ from .._C._nn import gelu as gelu
626
+ from .._C._nn import softplus as softplus
627
+ from .._C._nn import softshrink as softshrink
628
+ from .._C._nn import linear as linear
629
+ from .._C._nn import pad as pad
630
+ from .._C._nn import one_hot as one_hot
631
+ from .._C._nn import scaled_dot_product_attention as scaled_dot_product_attention
632
+ from .._C._nn import log_sigmoid
633
+ logsigmoid = log_sigmoid
634
+
635
+ @overload
636
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
637
+ @overload
638
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
639
+ @overload
640
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
641
+ @overload
642
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
643
+ @overload
644
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
645
+ @overload
646
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
647
+ @overload
648
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
649
+ @overload
650
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
651
+ @overload
652
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
653
+ @overload
654
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
655
+ @overload
656
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
657
+ @overload
658
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
659
+ @overload
660
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
661
+ @overload
662
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
663
+ @overload
664
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
665
+ @overload
666
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
667
+ @overload
668
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
669
+ @overload
670
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
671
+ @overload
672
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
673
+ @overload
674
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
675
+ @overload
676
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
677
+ @overload
678
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
679
+ @overload
680
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
681
+ @overload
682
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
venv/lib/python3.10/site-packages/torch/nn/grad.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gradient interface."""
2
+
3
+ import torch
4
+ from .modules.utils import _single, _pair, _triple
5
+
6
+
7
+ def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
8
+ r"""Compute the gradient of conv1d with respect to the input of the convolution.
9
+
10
+ This is same as the 1D transposed convolution operator under the hood but requires
11
+ the shape of the gradient w.r.t. input to be specified explicitly.
12
+
13
+ Args:
14
+ input_size : Shape of the input gradient tensor
15
+ weight: weight tensor (out_channels x in_channels/groups x kW)
16
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
17
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
18
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
19
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
20
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
21
+
22
+ Examples::
23
+
24
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
25
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
26
+ >>> output = F.conv1d(input, weight)
27
+ >>> grad_output = torch.randn(output.shape)
28
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
29
+ >>> F.grad.conv1d_input(input.shape, weight, grad_output)
30
+
31
+ """
32
+ input = grad_output.new_empty(1).expand(input_size)
33
+
34
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
35
+ _single(stride), _single(padding), _single(dilation),
36
+ False, [0], groups, (True, False, False))[0]
37
+
38
+
39
+ def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
40
+ r"""Compute the gradient of conv1d with respect to the weight of the convolution.
41
+
42
+ Args:
43
+ input: input tensor of shape (minibatch x in_channels x iW)
44
+ weight_size : Shape of the weight gradient tensor
45
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
46
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
47
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
48
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
49
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
50
+
51
+ Examples::
52
+
53
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
54
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
55
+ >>> output = F.conv1d(input, weight)
56
+ >>> grad_output = torch.randn(output.shape)
57
+ >>> # xdoctest: +SKIP
58
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
59
+ >>> F.grad.conv1d_weight(input, weight.shape, grad_output)
60
+
61
+ """
62
+ weight = grad_output.new_empty(1).expand(weight_size)
63
+
64
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
65
+ _single(stride), _single(padding), _single(dilation),
66
+ False, [0], groups, (False, True, False))[1]
67
+
68
+
69
+ def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
70
+ r"""Compute the gradient of conv2d with respect to the input of the convolution.
71
+
72
+ This is same as the 2D transposed convolution operator under the hood but requires
73
+ the shape of the gradient w.r.t. input to be specified explicitly.
74
+
75
+ Args:
76
+ input_size : Shape of the input gradient tensor
77
+ weight: weight tensor (out_channels x in_channels/groups x kH x kW)
78
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
79
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
80
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
81
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
82
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
83
+
84
+ Examples::
85
+
86
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
87
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
88
+ >>> output = F.conv2d(input, weight)
89
+ >>> grad_output = torch.randn(output.shape)
90
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
91
+ >>> F.grad.conv2d_input(input.shape, weight, grad_output)
92
+
93
+ """
94
+ input = grad_output.new_empty(1).expand(input_size)
95
+
96
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
97
+ _pair(stride), _pair(padding), _pair(dilation),
98
+ False, [0], groups, (True, False, False))[0]
99
+
100
+
101
+ def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
102
+ r"""Compute the gradient of conv2d with respect to the weight of the convolution.
103
+
104
+ Args:
105
+ input: input tensor of shape (minibatch x in_channels x iH x iW)
106
+ weight_size : Shape of the weight gradient tensor
107
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
108
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
109
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
110
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
111
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
112
+
113
+ Examples::
114
+
115
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
116
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
117
+ >>> output = F.conv2d(input, weight)
118
+ >>> grad_output = torch.randn(output.shape)
119
+ >>> # xdoctest: +SKIP
120
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
121
+ >>> F.grad.conv2d_weight(input, weight.shape, grad_output)
122
+
123
+ """
124
+ weight = grad_output.new_empty(1).expand(weight_size)
125
+
126
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
127
+ _pair(stride), _pair(padding), _pair(dilation),
128
+ False, [0], groups, (False, True, False))[1]
129
+
130
+
131
+ def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
132
+ r"""Compute the gradient of conv3d with respect to the input of the convolution.
133
+
134
+ This is same as the 3D transposed convolution operator under the hood but requires
135
+ the shape of the gradient w.r.t. input to be specified explicitly.
136
+
137
+ Args:
138
+ input_size : Shape of the input gradient tensor
139
+ weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
140
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
141
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
142
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
143
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
144
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
145
+
146
+ Examples::
147
+
148
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
149
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
150
+ >>> output = F.conv3d(input, weight)
151
+ >>> grad_output = torch.randn(output.shape)
152
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
153
+ >>> F.grad.conv3d_input(input.shape, weight, grad_output)
154
+
155
+ """
156
+ input = grad_output.new_empty(1).expand(input_size)
157
+
158
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
159
+ _triple(stride), _triple(padding), _triple(dilation),
160
+ False, [0], groups, (True, False, False))[0]
161
+
162
+
163
+ def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
164
+ r"""Compute the gradient of conv3d with respect to the weight of the convolution.
165
+
166
+ Args:
167
+ input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
168
+ weight_size : Shape of the weight gradient tensor
169
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
170
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
171
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
172
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
173
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
174
+
175
+ Examples::
176
+
177
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
178
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
179
+ >>> output = F.conv3d(input, weight)
180
+ >>> grad_output = torch.randn(output.shape)
181
+ >>> grad_weight = torch.autograd.grad(output, weight, grad_output)
182
+ >>> F.grad.conv3d_weight(input, weight.shape, grad_output)
183
+
184
+ """
185
+ weight = grad_output.new_empty(1).expand(weight_size)
186
+
187
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
188
+ _triple(stride), _triple(padding), _triple(dilation),
189
+ False, [0], groups, (False, True, False))[1]
venv/lib/python3.10/site-packages/torch/nn/init.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file contains utilities for initializing neural network parameters."""
2
+ import math
3
+ import warnings
4
+
5
+ from torch import Tensor
6
+ import torch
7
+ from typing import Optional as _Optional
8
+
9
+ # These no_grad_* functions are necessary as wrappers around the parts of these
10
+ # functions that use `with torch.no_grad()`. The JIT doesn't support context
11
+ # managers, so these need to be implemented as builtins. Using these wrappers
12
+ # lets us keep those builtins small and re-usable.
13
+ def _no_grad_uniform_(tensor, a, b, generator=None):
14
+ with torch.no_grad():
15
+ return tensor.uniform_(a, b, generator=generator)
16
+
17
+
18
+ def _no_grad_normal_(tensor, mean, std, generator=None):
19
+ with torch.no_grad():
20
+ return tensor.normal_(mean, std, generator=generator)
21
+
22
+
23
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=None):
24
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
25
+ def norm_cdf(x):
26
+ # Computes standard normal cumulative distribution function
27
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
28
+
29
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
30
+ warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
31
+ "The distribution of values may be incorrect.",
32
+ stacklevel=2)
33
+
34
+ with torch.no_grad():
35
+ # Values are generated by using a truncated uniform distribution and
36
+ # then using the inverse CDF for the normal distribution.
37
+ # Get upper and lower cdf values
38
+ l = norm_cdf((a - mean) / std)
39
+ u = norm_cdf((b - mean) / std)
40
+
41
+ # Uniformly fill tensor with values from [l, u], then translate to
42
+ # [2l-1, 2u-1].
43
+ tensor.uniform_(2 * l - 1, 2 * u - 1, generator=generator)
44
+
45
+ # Use inverse cdf transform for normal distribution to get truncated
46
+ # standard normal
47
+ tensor.erfinv_()
48
+
49
+ # Transform to proper mean, std
50
+ tensor.mul_(std * math.sqrt(2.))
51
+ tensor.add_(mean)
52
+
53
+ # Clamp to ensure it's in the proper range
54
+ tensor.clamp_(min=a, max=b)
55
+ return tensor
56
+
57
+
58
+ def _no_grad_fill_(tensor, val):
59
+ with torch.no_grad():
60
+ return tensor.fill_(val)
61
+
62
+
63
+ def _no_grad_zero_(tensor):
64
+ with torch.no_grad():
65
+ return tensor.zero_()
66
+
67
+
68
+ def calculate_gain(nonlinearity, param=None):
69
+ r"""Return the recommended gain value for the given nonlinearity function.
70
+
71
+ The values are as follows:
72
+
73
+ ================= ====================================================
74
+ nonlinearity gain
75
+ ================= ====================================================
76
+ Linear / Identity :math:`1`
77
+ Conv{1,2,3}D :math:`1`
78
+ Sigmoid :math:`1`
79
+ Tanh :math:`\frac{5}{3}`
80
+ ReLU :math:`\sqrt{2}`
81
+ Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
82
+ SELU :math:`\frac{3}{4}`
83
+ ================= ====================================================
84
+
85
+ .. warning::
86
+ In order to implement `Self-Normalizing Neural Networks`_ ,
87
+ you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
88
+ This gives the initial weights a variance of ``1 / N``,
89
+ which is necessary to induce a stable fixed point in the forward pass.
90
+ In contrast, the default gain for ``SELU`` sacrifices the normalization
91
+ effect for more stable gradient flow in rectangular layers.
92
+
93
+ Args:
94
+ nonlinearity: the non-linear function (`nn.functional` name)
95
+ param: optional parameter for the non-linear function
96
+
97
+ Examples:
98
+ >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
99
+
100
+ .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
101
+ """
102
+ linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
103
+ if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
104
+ return 1
105
+ elif nonlinearity == 'tanh':
106
+ return 5.0 / 3
107
+ elif nonlinearity == 'relu':
108
+ return math.sqrt(2.0)
109
+ elif nonlinearity == 'leaky_relu':
110
+ if param is None:
111
+ negative_slope = 0.01
112
+ elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
113
+ # True/False are instances of int, hence check above
114
+ negative_slope = param
115
+ else:
116
+ raise ValueError(f"negative_slope {param} not a valid number")
117
+ return math.sqrt(2.0 / (1 + negative_slope ** 2))
118
+ elif nonlinearity == 'selu':
119
+ return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)
120
+ else:
121
+ raise ValueError(f"Unsupported nonlinearity {nonlinearity}")
122
+
123
+
124
+ def uniform_(
125
+ tensor: Tensor,
126
+ a: float = 0.0,
127
+ b: float = 1.0,
128
+ generator: _Optional[torch.Generator] = None,
129
+ ) -> Tensor:
130
+ r"""Fill the input Tensor with values drawn from the uniform distribution.
131
+
132
+ :math:`\mathcal{U}(a, b)`.
133
+
134
+ Args:
135
+ tensor: an n-dimensional `torch.Tensor`
136
+ a: the lower bound of the uniform distribution
137
+ b: the upper bound of the uniform distribution
138
+ generator: the torch Generator to sample from (default: None)
139
+
140
+ Examples:
141
+ >>> w = torch.empty(3, 5)
142
+ >>> nn.init.uniform_(w)
143
+ """
144
+ if torch.overrides.has_torch_function_variadic(tensor):
145
+ return torch.overrides.handle_torch_function(
146
+ uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator
147
+ )
148
+ return _no_grad_uniform_(tensor, a, b, generator)
149
+
150
+
151
+ def normal_(
152
+ tensor: Tensor,
153
+ mean: float = 0.0,
154
+ std: float = 1.0,
155
+ generator: _Optional[torch.Generator] = None,
156
+ ) -> Tensor:
157
+ r"""Fill the input Tensor with values drawn from the normal distribution.
158
+
159
+ :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
160
+
161
+ Args:
162
+ tensor: an n-dimensional `torch.Tensor`
163
+ mean: the mean of the normal distribution
164
+ std: the standard deviation of the normal distribution
165
+ generator: the torch Generator to sample from (default: None)
166
+
167
+ Examples:
168
+ >>> w = torch.empty(3, 5)
169
+ >>> nn.init.normal_(w)
170
+ """
171
+ if torch.overrides.has_torch_function_variadic(tensor):
172
+ return torch.overrides.handle_torch_function(
173
+ normal_, (tensor,), tensor=tensor, mean=mean, std=std, generator=generator
174
+ )
175
+ return _no_grad_normal_(tensor, mean, std, generator)
176
+
177
+ def trunc_normal_(
178
+ tensor: Tensor,
179
+ mean: float = 0.,
180
+ std: float = 1.,
181
+ a: float = -2.,
182
+ b: float = 2.,
183
+ generator: _Optional[torch.Generator] = None
184
+ ) -> Tensor:
185
+ r"""Fill the input Tensor with values drawn from a truncated normal distribution.
186
+
187
+ The values are effectively drawn from the
188
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
189
+ with values outside :math:`[a, b]` redrawn until they are within
190
+ the bounds. The method used for generating the random values works
191
+ best when :math:`a \leq \text{mean} \leq b`.
192
+
193
+ Args:
194
+ tensor: an n-dimensional `torch.Tensor`
195
+ mean: the mean of the normal distribution
196
+ std: the standard deviation of the normal distribution
197
+ a: the minimum cutoff value
198
+ b: the maximum cutoff value
199
+ generator: the torch Generator to sample from (default: None)
200
+
201
+ Examples:
202
+ >>> w = torch.empty(3, 5)
203
+ >>> nn.init.trunc_normal_(w)
204
+ """
205
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=generator)
206
+
207
+
208
+ def constant_(tensor: Tensor, val: float) -> Tensor:
209
+ r"""Fill the input Tensor with the value :math:`\text{val}`.
210
+
211
+ Args:
212
+ tensor: an n-dimensional `torch.Tensor`
213
+ val: the value to fill the tensor with
214
+
215
+ Examples:
216
+ >>> w = torch.empty(3, 5)
217
+ >>> nn.init.constant_(w, 0.3)
218
+ """
219
+ if torch.overrides.has_torch_function_variadic(tensor):
220
+ return torch.overrides.handle_torch_function(constant_, (tensor,), tensor=tensor, val=val)
221
+ return _no_grad_fill_(tensor, val)
222
+
223
+
224
+ def ones_(tensor: Tensor) -> Tensor:
225
+ r"""Fill the input Tensor with the scalar value `1`.
226
+
227
+ Args:
228
+ tensor: an n-dimensional `torch.Tensor`
229
+
230
+ Examples:
231
+ >>> w = torch.empty(3, 5)
232
+ >>> nn.init.ones_(w)
233
+ """
234
+ return _no_grad_fill_(tensor, 1.)
235
+
236
+
237
+ def zeros_(tensor: Tensor) -> Tensor:
238
+ r"""Fill the input Tensor with the scalar value `0`.
239
+
240
+ Args:
241
+ tensor: an n-dimensional `torch.Tensor`
242
+
243
+ Examples:
244
+ >>> w = torch.empty(3, 5)
245
+ >>> nn.init.zeros_(w)
246
+ """
247
+ return _no_grad_zero_(tensor)
248
+
249
+
250
+ def eye_(tensor):
251
+ r"""Fill the 2-dimensional input `Tensor` with the identity matrix.
252
+
253
+ Preserves the identity of the inputs in `Linear` layers, where as
254
+ many inputs are preserved as possible.
255
+
256
+ Args:
257
+ tensor: a 2-dimensional `torch.Tensor`
258
+
259
+ Examples:
260
+ >>> w = torch.empty(3, 5)
261
+ >>> nn.init.eye_(w)
262
+ """
263
+ if tensor.ndimension() != 2:
264
+ raise ValueError("Only tensors with 2 dimensions are supported")
265
+
266
+ with torch.no_grad():
267
+ torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
268
+ return tensor
269
+
270
+
271
+ def dirac_(tensor, groups=1):
272
+ r"""Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function.
273
+
274
+ Preserves the identity of the inputs in `Convolutional`
275
+ layers, where as many input channels are preserved as possible. In case
276
+ of groups>1, each group of channels preserves identity
277
+
278
+ Args:
279
+ tensor: a {3, 4, 5}-dimensional `torch.Tensor`
280
+ groups (int, optional): number of groups in the conv layer (default: 1)
281
+ Examples:
282
+ >>> w = torch.empty(3, 16, 5, 5)
283
+ >>> nn.init.dirac_(w)
284
+ >>> w = torch.empty(3, 24, 5, 5)
285
+ >>> nn.init.dirac_(w, 3)
286
+ """
287
+ dimensions = tensor.ndimension()
288
+ if dimensions not in [3, 4, 5]:
289
+ raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
290
+
291
+ sizes = tensor.size()
292
+
293
+ if sizes[0] % groups != 0:
294
+ raise ValueError('dim 0 must be divisible by groups')
295
+
296
+ out_chans_per_grp = sizes[0] // groups
297
+ min_dim = min(out_chans_per_grp, sizes[1])
298
+
299
+ with torch.no_grad():
300
+ tensor.zero_()
301
+
302
+ for g in range(groups):
303
+ for d in range(min_dim):
304
+ if dimensions == 3: # Temporal convolution
305
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1
306
+ elif dimensions == 4: # Spatial convolution
307
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
308
+ tensor.size(3) // 2] = 1
309
+ else: # Volumetric convolution
310
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
311
+ tensor.size(3) // 2, tensor.size(4) // 2] = 1
312
+ return tensor
313
+
314
+
315
+ def _calculate_fan_in_and_fan_out(tensor):
316
+ dimensions = tensor.dim()
317
+ if dimensions < 2:
318
+ raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
319
+
320
+ num_input_fmaps = tensor.size(1)
321
+ num_output_fmaps = tensor.size(0)
322
+ receptive_field_size = 1
323
+ if tensor.dim() > 2:
324
+ # math.prod is not always available, accumulate the product manually
325
+ # we could use functools.reduce but that is not supported by TorchScript
326
+ for s in tensor.shape[2:]:
327
+ receptive_field_size *= s
328
+ fan_in = num_input_fmaps * receptive_field_size
329
+ fan_out = num_output_fmaps * receptive_field_size
330
+
331
+ return fan_in, fan_out
332
+
333
+
334
+ def xavier_uniform_(
335
+ tensor: Tensor, gain: float = 1.0, generator: _Optional[torch.Generator] = None
336
+ ) -> Tensor:
337
+ r"""Fill the input `Tensor` with values using a Xavier uniform distribution.
338
+
339
+ The method is described in `Understanding the difficulty of training
340
+ deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010).
341
+ The resulting tensor will have values sampled from
342
+ :math:`\mathcal{U}(-a, a)` where
343
+
344
+ .. math::
345
+ a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
346
+
347
+ Also known as Glorot initialization.
348
+
349
+ Args:
350
+ tensor: an n-dimensional `torch.Tensor`
351
+ gain: an optional scaling factor
352
+ generator: the torch Generator to sample from (default: None)
353
+
354
+ Examples:
355
+ >>> w = torch.empty(3, 5)
356
+ >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
357
+ """
358
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
359
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
360
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
361
+
362
+ return _no_grad_uniform_(tensor, -a, a, generator)
363
+
364
+
365
+ def xavier_normal_(
366
+ tensor: Tensor,
367
+ gain: float = 1.0,
368
+ generator: _Optional[torch.Generator] = None,
369
+ ) -> Tensor:
370
+ r"""Fill the input `Tensor` with values using a Xavier normal distribution.
371
+
372
+ The method is described in `Understanding the difficulty of training deep feedforward
373
+ neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor
374
+ will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where
375
+
376
+ .. math::
377
+ \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
378
+
379
+ Also known as Glorot initialization.
380
+
381
+ Args:
382
+ tensor: an n-dimensional `torch.Tensor`
383
+ gain: an optional scaling factor
384
+ generator: the torch Generator to sample from (default: None)
385
+
386
+ Examples:
387
+ >>> w = torch.empty(3, 5)
388
+ >>> nn.init.xavier_normal_(w)
389
+ """
390
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
391
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
392
+
393
+ return _no_grad_normal_(tensor, 0., std, generator)
394
+
395
+
396
+ def _calculate_correct_fan(tensor, mode):
397
+ mode = mode.lower()
398
+ valid_modes = ['fan_in', 'fan_out']
399
+ if mode not in valid_modes:
400
+ raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}")
401
+
402
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
403
+ return fan_in if mode == 'fan_in' else fan_out
404
+
405
+
406
+ def kaiming_uniform_(
407
+ tensor: Tensor,
408
+ a: float = 0,
409
+ mode: str = "fan_in",
410
+ nonlinearity: str = "leaky_relu",
411
+ generator: _Optional[torch.Generator] = None,
412
+ ):
413
+ r"""Fill the input `Tensor` with values using a Kaiming uniform distribution.
414
+
415
+ The method is described in `Delving deep into rectifiers: Surpassing
416
+ human-level performance on ImageNet classification` - He, K. et al. (2015).
417
+ The resulting tensor will have values sampled from
418
+ :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
419
+
420
+ .. math::
421
+ \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
422
+
423
+ Also known as He initialization.
424
+
425
+ Args:
426
+ tensor: an n-dimensional `torch.Tensor`
427
+ a: the negative slope of the rectifier used after this layer (only
428
+ used with ``'leaky_relu'``)
429
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
430
+ preserves the magnitude of the variance of the weights in the
431
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
432
+ backwards pass.
433
+ nonlinearity: the non-linear function (`nn.functional` name),
434
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
435
+ generator: the torch Generator to sample from (default: None)
436
+
437
+ Examples:
438
+ >>> w = torch.empty(3, 5)
439
+ >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
440
+ """
441
+ if torch.overrides.has_torch_function_variadic(tensor):
442
+ return torch.overrides.handle_torch_function(
443
+ kaiming_uniform_,
444
+ (tensor,),
445
+ tensor=tensor,
446
+ a=a,
447
+ mode=mode,
448
+ nonlinearity=nonlinearity,
449
+ generator=generator)
450
+
451
+ if 0 in tensor.shape:
452
+ warnings.warn("Initializing zero-element tensors is a no-op")
453
+ return tensor
454
+ fan = _calculate_correct_fan(tensor, mode)
455
+ gain = calculate_gain(nonlinearity, a)
456
+ std = gain / math.sqrt(fan)
457
+ bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
458
+ with torch.no_grad():
459
+ return tensor.uniform_(-bound, bound, generator=generator)
460
+
461
+
462
+ def kaiming_normal_(
463
+ tensor: Tensor,
464
+ a: float = 0,
465
+ mode: str = "fan_in",
466
+ nonlinearity: str = "leaky_relu",
467
+ generator: _Optional[torch.Generator] = None,
468
+ ):
469
+ r"""Fill the input `Tensor` with values using a Kaiming normal distribution.
470
+
471
+ The method is described in `Delving deep into rectifiers: Surpassing
472
+ human-level performance on ImageNet classification` - He, K. et al. (2015).
473
+ The resulting tensor will have values sampled from
474
+ :math:`\mathcal{N}(0, \text{std}^2)` where
475
+
476
+ .. math::
477
+ \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
478
+
479
+ Also known as He initialization.
480
+
481
+ Args:
482
+ tensor: an n-dimensional `torch.Tensor`
483
+ a: the negative slope of the rectifier used after this layer (only
484
+ used with ``'leaky_relu'``)
485
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
486
+ preserves the magnitude of the variance of the weights in the
487
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
488
+ backwards pass.
489
+ nonlinearity: the non-linear function (`nn.functional` name),
490
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
491
+ generator: the torch Generator to sample from (default: None)
492
+
493
+ Examples:
494
+ >>> w = torch.empty(3, 5)
495
+ >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
496
+ """
497
+ if 0 in tensor.shape:
498
+ warnings.warn("Initializing zero-element tensors is a no-op")
499
+ return tensor
500
+ fan = _calculate_correct_fan(tensor, mode)
501
+ gain = calculate_gain(nonlinearity, a)
502
+ std = gain / math.sqrt(fan)
503
+ with torch.no_grad():
504
+ return tensor.normal_(0, std, generator=generator)
505
+
506
+
507
+ def orthogonal_(
508
+ tensor,
509
+ gain=1,
510
+ generator: _Optional[torch.Generator] = None,
511
+ ):
512
+ r"""Fill the input `Tensor` with a (semi) orthogonal matrix.
513
+
514
+ Described in `Exact solutions to the nonlinear dynamics of learning in deep
515
+ linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
516
+ at least 2 dimensions, and for tensors with more than 2 dimensions the
517
+ trailing dimensions are flattened.
518
+
519
+ Args:
520
+ tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
521
+ gain: optional scaling factor
522
+ generator: the torch Generator to sample from (default: None)
523
+
524
+ Examples:
525
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
526
+ >>> w = torch.empty(3, 5)
527
+ >>> nn.init.orthogonal_(w)
528
+ """
529
+ if tensor.ndimension() < 2:
530
+ raise ValueError("Only tensors with 2 or more dimensions are supported")
531
+
532
+ if tensor.numel() == 0:
533
+ # no-op
534
+ return tensor
535
+ rows = tensor.size(0)
536
+ cols = tensor.numel() // rows
537
+ flattened = tensor.new(rows, cols).normal_(0, 1, generator=generator)
538
+
539
+ if rows < cols:
540
+ flattened.t_()
541
+
542
+ # Compute the qr factorization
543
+ q, r = torch.linalg.qr(flattened)
544
+ # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
545
+ d = torch.diag(r, 0)
546
+ ph = d.sign()
547
+ q *= ph
548
+
549
+ if rows < cols:
550
+ q.t_()
551
+
552
+ with torch.no_grad():
553
+ tensor.view_as(q).copy_(q)
554
+ tensor.mul_(gain)
555
+ return tensor
556
+
557
+
558
+ def sparse_(
559
+ tensor,
560
+ sparsity,
561
+ std=0.01,
562
+ generator: _Optional[torch.Generator] = None,
563
+ ):
564
+ r"""Fill the 2D input `Tensor` as a sparse matrix.
565
+
566
+ The non-zero elements will be drawn from the normal distribution
567
+ :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
568
+ Hessian-free optimization` - Martens, J. (2010).
569
+
570
+ Args:
571
+ tensor: an n-dimensional `torch.Tensor`
572
+ sparsity: The fraction of elements in each column to be set to zero
573
+ std: the standard deviation of the normal distribution used to generate
574
+ the non-zero values
575
+ generator: the torch Generator to sample from (default: None)
576
+
577
+ Examples:
578
+ >>> w = torch.empty(3, 5)
579
+ >>> nn.init.sparse_(w, sparsity=0.1)
580
+ """
581
+ if tensor.ndimension() != 2:
582
+ raise ValueError("Only tensors with 2 dimensions are supported")
583
+
584
+ rows, cols = tensor.shape
585
+ num_zeros = int(math.ceil(sparsity * rows))
586
+
587
+ with torch.no_grad():
588
+ tensor.normal_(0, std, generator=generator)
589
+ for col_idx in range(cols):
590
+ row_indices = torch.randperm(rows)
591
+ zero_indices = row_indices[:num_zeros]
592
+ tensor[zero_indices, col_idx] = 0
593
+ return tensor
594
+
595
+
596
+ # for backward compatibility
597
+ def _make_deprecate(meth):
598
+ new_name = meth.__name__
599
+ old_name = new_name[:-1]
600
+
601
+ def deprecated_init(*args, **kwargs):
602
+ warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2)
603
+ return meth(*args, **kwargs)
604
+
605
+ deprecated_init.__doc__ = fr"""
606
+ {old_name}(...)
607
+
608
+ .. warning::
609
+ This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
610
+
611
+ See :func:`~torch.nn.init.{new_name}` for details."""
612
+ deprecated_init.__name__ = old_name
613
+ return deprecated_init
614
+
615
+
616
+ uniform = _make_deprecate(uniform_)
617
+ normal = _make_deprecate(normal_)
618
+ constant = _make_deprecate(constant_)
619
+ eye = _make_deprecate(eye_)
620
+ dirac = _make_deprecate(dirac_)
621
+ xavier_uniform = _make_deprecate(xavier_uniform_)
622
+ xavier_normal = _make_deprecate(xavier_normal_)
623
+ kaiming_uniform = _make_deprecate(kaiming_uniform_)
624
+ kaiming_normal = _make_deprecate(kaiming_normal_)
625
+ orthogonal = _make_deprecate(orthogonal_)
626
+ sparse = _make_deprecate(sparse_)
venv/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic import ConvBn1d
2
+ from torch.ao.nn.intrinsic import ConvBn2d
3
+ from torch.ao.nn.intrinsic import ConvBn3d
4
+ from torch.ao.nn.intrinsic import ConvBnReLU1d
5
+ from torch.ao.nn.intrinsic import ConvBnReLU2d
6
+ from torch.ao.nn.intrinsic import ConvBnReLU3d
7
+ from torch.ao.nn.intrinsic import ConvReLU1d
8
+ from torch.ao.nn.intrinsic import ConvReLU2d
9
+ from torch.ao.nn.intrinsic import ConvReLU3d
10
+ from torch.ao.nn.intrinsic import LinearReLU
11
+ from torch.ao.nn.intrinsic import BNReLU2d
12
+ from torch.ao.nn.intrinsic import BNReLU3d
13
+ from torch.ao.nn.intrinsic import LinearBn1d
14
+ from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
15
+
16
+ # Include the subpackages in case user imports from it directly
17
+ from . import modules # noqa: F401
18
+ from . import qat # noqa: F401
19
+ from . import quantized # noqa: F401
20
+
21
+ __all__ = [
22
+ 'ConvBn1d',
23
+ 'ConvBn2d',
24
+ 'ConvBn3d',
25
+ 'ConvBnReLU1d',
26
+ 'ConvBnReLU2d',
27
+ 'ConvBnReLU3d',
28
+ 'ConvReLU1d',
29
+ 'ConvReLU2d',
30
+ 'ConvReLU3d',
31
+ 'LinearReLU',
32
+ 'BNReLU2d',
33
+ 'BNReLU3d',
34
+ 'LinearBn1d',
35
+ ]
venv/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (887 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .fused import _FusedModule # noqa: F401
2
+ from .fused import BNReLU2d
3
+ from .fused import BNReLU3d
4
+ from .fused import ConvBn1d
5
+ from .fused import ConvBn2d
6
+ from .fused import ConvBn3d
7
+ from .fused import ConvBnReLU1d
8
+ from .fused import ConvBnReLU2d
9
+ from .fused import ConvBnReLU3d
10
+ from .fused import ConvReLU1d
11
+ from .fused import ConvReLU2d
12
+ from .fused import ConvReLU3d
13
+ from .fused import LinearBn1d
14
+ from .fused import LinearReLU
15
+
16
+
17
+ __all__ = [
18
+ 'BNReLU2d',
19
+ 'BNReLU3d',
20
+ 'ConvBn1d',
21
+ 'ConvBn2d',
22
+ 'ConvBn3d',
23
+ 'ConvBnReLU1d',
24
+ 'ConvBnReLU2d',
25
+ 'ConvBnReLU3d',
26
+ 'ConvReLU1d',
27
+ 'ConvReLU2d',
28
+ 'ConvReLU3d',
29
+ 'LinearBn1d',
30
+ 'LinearReLU',
31
+ ]
venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (747 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc ADDED
Binary file (797 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic import BNReLU2d
2
+ from torch.ao.nn.intrinsic import BNReLU3d
3
+ from torch.ao.nn.intrinsic import ConvBn1d
4
+ from torch.ao.nn.intrinsic import ConvBn2d
5
+ from torch.ao.nn.intrinsic import ConvBn3d
6
+ from torch.ao.nn.intrinsic import ConvBnReLU1d
7
+ from torch.ao.nn.intrinsic import ConvBnReLU2d
8
+ from torch.ao.nn.intrinsic import ConvBnReLU3d
9
+ from torch.ao.nn.intrinsic import ConvReLU1d
10
+ from torch.ao.nn.intrinsic import ConvReLU2d
11
+ from torch.ao.nn.intrinsic import ConvReLU3d
12
+ from torch.ao.nn.intrinsic import LinearBn1d
13
+ from torch.ao.nn.intrinsic import LinearReLU
14
+ from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
15
+
16
+ __all__ = [
17
+ 'BNReLU2d',
18
+ 'BNReLU3d',
19
+ 'ConvBn1d',
20
+ 'ConvBn2d',
21
+ 'ConvBn3d',
22
+ 'ConvBnReLU1d',
23
+ 'ConvBnReLU2d',
24
+ 'ConvBnReLU3d',
25
+ 'ConvReLU1d',
26
+ 'ConvReLU2d',
27
+ 'ConvReLU3d',
28
+ 'LinearBn1d',
29
+ 'LinearReLU',
30
+ ]
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (214 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+ from .linear_fused import LinearBn1d
3
+ from .conv_fused import (
4
+ ConvBn1d,
5
+ ConvBn2d,
6
+ ConvBn3d,
7
+ ConvBnReLU1d,
8
+ ConvBnReLU2d,
9
+ ConvBnReLU3d,
10
+ ConvReLU1d,
11
+ ConvReLU2d,
12
+ ConvReLU3d,
13
+ update_bn_stats,
14
+ freeze_bn_stats,
15
+ )
16
+
17
+ __all__ = [
18
+ "LinearReLU",
19
+ "LinearBn1d",
20
+ "ConvReLU1d",
21
+ "ConvReLU2d",
22
+ "ConvReLU3d",
23
+ "ConvBn1d",
24
+ "ConvBn2d",
25
+ "ConvBn3d",
26
+ "ConvBnReLU1d",
27
+ "ConvBnReLU2d",
28
+ "ConvBnReLU3d",
29
+ "update_bn_stats",
30
+ "freeze_bn_stats",
31
+ ]
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (642 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc ADDED
Binary file (648 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ # Modules
13
+ 'ConvBn1d',
14
+ 'ConvBnReLU1d',
15
+ 'ConvReLU1d',
16
+ 'ConvBn2d',
17
+ 'ConvBnReLU2d',
18
+ 'ConvReLU2d',
19
+ 'ConvBn3d',
20
+ 'ConvBnReLU3d',
21
+ 'ConvReLU3d',
22
+ # Utilities
23
+ 'freeze_bn_stats',
24
+ 'update_bn_stats',
25
+ ]
26
+
27
+ from torch.ao.nn.intrinsic.qat import ConvBn1d
28
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU1d
29
+ from torch.ao.nn.intrinsic.qat import ConvReLU1d
30
+ from torch.ao.nn.intrinsic.qat import ConvBn2d
31
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU2d
32
+ from torch.ao.nn.intrinsic.qat import ConvReLU2d
33
+ from torch.ao.nn.intrinsic.qat import ConvBn3d
34
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU3d
35
+ from torch.ao.nn.intrinsic.qat import ConvReLU3d
36
+ from torch.ao.nn.intrinsic.qat import freeze_bn_stats
37
+ from torch.ao.nn.intrinsic.qat import update_bn_stats
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ 'LinearBn1d',
13
+ ]
14
+
15
+ from torch.ao.nn.intrinsic.qat import LinearBn1d
venv/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ 'LinearReLU',
13
+ ]
14
+
15
+ from torch.ao.nn.intrinsic.qat import LinearReLU
venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+ # to ensure customers can use the module below
3
+ # without importing it directly
4
+ import torch.nn.intrinsic.quantized.dynamic
5
+
6
+ __all__ = [
7
+ 'BNReLU2d',
8
+ 'BNReLU3d',
9
+ 'ConvReLU1d',
10
+ 'ConvReLU2d',
11
+ 'ConvReLU3d',
12
+ 'LinearReLU',
13
+ ]
venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (369 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403