applied-ai-018 commited on
Commit
788fc92
·
verified ·
1 Parent(s): 10452d3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/backends/__init__.py +70 -0
  4. venv/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py +206 -0
  5. venv/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py +62 -0
  8. venv/lib/python3.10/site-packages/torch/backends/mha/__init__.py +24 -0
  9. venv/lib/python3.10/site-packages/torch/backends/mha/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/backends/mkl/__init__.py +56 -0
  11. venv/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py +97 -0
  13. venv/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/backends/mps/__init__.py +54 -0
  15. venv/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py +110 -0
  16. venv/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/backends/quantized/__init__.py +65 -0
  18. venv/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py +28 -0
  20. venv/lib/python3.10/site-packages/torch/backends/xnnpack/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/bernoulli.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/beta.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/binomial.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/categorical.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/cauchy.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/constraint_registry.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/constraints.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/continuous_bernoulli.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/dirichlet.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/distribution.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/exp_family.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/exponential.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/gamma.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/geometric.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/gumbel.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/half_cauchy.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/half_normal.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/inverse_gamma.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/kl.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/kumaraswamy.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/laplace.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/log_normal.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/logistic_normal.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/lowrank_multivariate_normal.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/distributions/__pycache__/mixture_same_family.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0af90c04e09d9f170443072e085ef161689484cbde3891a53fcf34e71cdefa42
3
+ size 415237197
ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c6864d6b71e14e9528be3d7891c348b9312e7290005978f96fcc17c31157d98
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/backends/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import types
2
+ from contextlib import contextmanager
3
+
4
+ # The idea for this parameter is that we forbid bare assignment
5
+ # to torch.backends.<cudnn|mkldnn>.enabled and friends when running our
6
+ # test suite, where it's very easy to forget to undo the change
7
+ # later.
8
+ __allow_nonbracketed_mutation_flag = True
9
+
10
+
11
+ def disable_global_flags():
12
+ global __allow_nonbracketed_mutation_flag
13
+ __allow_nonbracketed_mutation_flag = False
14
+
15
+
16
+ def flags_frozen():
17
+ return not __allow_nonbracketed_mutation_flag
18
+
19
+
20
+ @contextmanager
21
+ def __allow_nonbracketed_mutation():
22
+ global __allow_nonbracketed_mutation_flag
23
+ old = __allow_nonbracketed_mutation_flag
24
+ __allow_nonbracketed_mutation_flag = True
25
+ try:
26
+ yield
27
+ finally:
28
+ __allow_nonbracketed_mutation_flag = old
29
+
30
+
31
+ class ContextProp:
32
+ def __init__(self, getter, setter):
33
+ self.getter = getter
34
+ self.setter = setter
35
+
36
+ def __get__(self, obj, objtype):
37
+ return self.getter()
38
+
39
+ def __set__(self, obj, val):
40
+ if not flags_frozen():
41
+ self.setter(val)
42
+ else:
43
+ raise RuntimeError(
44
+ "not allowed to set %s flags "
45
+ "after disable_global_flags; please use flags() context manager instead"
46
+ % obj.__name__
47
+ )
48
+
49
+
50
+ class PropModule(types.ModuleType):
51
+ def __init__(self, m, name):
52
+ super().__init__(name)
53
+ self.m = m
54
+
55
+ def __getattr__(self, attr):
56
+ return self.m.__getattribute__(attr)
57
+
58
+
59
+ from torch.backends import (
60
+ cpu as cpu,
61
+ cuda as cuda,
62
+ cudnn as cudnn,
63
+ mha as mha,
64
+ mkl as mkl,
65
+ mkldnn as mkldnn,
66
+ mps as mps,
67
+ nnpack as nnpack,
68
+ openmp as openmp,
69
+ quantized as quantized,
70
+ )
venv/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import warnings
4
+ from contextlib import contextmanager
5
+ from typing import Optional
6
+
7
+ import torch
8
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
9
+
10
+ try:
11
+ from torch._C import _cudnn
12
+ except ImportError:
13
+ _cudnn = None # type: ignore[assignment]
14
+
15
+ # Write:
16
+ #
17
+ # torch.backends.cudnn.enabled = False
18
+ #
19
+ # to globally disable CuDNN/MIOpen
20
+
21
+ __cudnn_version: Optional[int] = None
22
+
23
+ if _cudnn is not None:
24
+
25
+ def _init():
26
+ global __cudnn_version
27
+ if __cudnn_version is None:
28
+ __cudnn_version = _cudnn.getVersionInt()
29
+ runtime_version = _cudnn.getRuntimeVersion()
30
+ compile_version = _cudnn.getCompileVersion()
31
+ runtime_major, runtime_minor, _ = runtime_version
32
+ compile_major, compile_minor, _ = compile_version
33
+ # Different major versions are always incompatible
34
+ # Starting with cuDNN 7, minor versions are backwards-compatible
35
+ # Not sure about MIOpen (ROCm), so always do a strict check
36
+ if runtime_major != compile_major:
37
+ cudnn_compatible = False
38
+ elif runtime_major < 7 or not _cudnn.is_cuda:
39
+ cudnn_compatible = runtime_minor == compile_minor
40
+ else:
41
+ cudnn_compatible = runtime_minor >= compile_minor
42
+ if not cudnn_compatible:
43
+ if os.environ.get("PYTORCH_SKIP_CUDNN_COMPATIBILITY_CHECK", "0") == "1":
44
+ return True
45
+ base_error_msg = (
46
+ f"cuDNN version incompatibility: "
47
+ f"PyTorch was compiled against {compile_version} "
48
+ f"but found runtime version {runtime_version}. "
49
+ f"PyTorch already comes bundled with cuDNN. "
50
+ f"One option to resolving this error is to ensure PyTorch "
51
+ f"can find the bundled cuDNN. "
52
+ )
53
+
54
+ if "LD_LIBRARY_PATH" in os.environ:
55
+ ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
56
+ if any(
57
+ substring in ld_library_path for substring in ["cuda", "cudnn"]
58
+ ):
59
+ raise RuntimeError(
60
+ f"{base_error_msg}"
61
+ f"Looks like your LD_LIBRARY_PATH contains incompatible version of cudnn. "
62
+ f"Please either remove it from the path or install cudnn {compile_version}"
63
+ )
64
+ else:
65
+ raise RuntimeError(
66
+ f"{base_error_msg}"
67
+ f"one possibility is that there is a "
68
+ f"conflicting cuDNN in LD_LIBRARY_PATH."
69
+ )
70
+ else:
71
+ raise RuntimeError(base_error_msg)
72
+
73
+ return True
74
+
75
+ else:
76
+
77
+ def _init():
78
+ return False
79
+
80
+
81
+ def version():
82
+ """Return the version of cuDNN."""
83
+ if not _init():
84
+ return None
85
+ return __cudnn_version
86
+
87
+
88
+ CUDNN_TENSOR_DTYPES = {
89
+ torch.half,
90
+ torch.float,
91
+ torch.double,
92
+ }
93
+
94
+
95
+ def is_available():
96
+ r"""Return a bool indicating if CUDNN is currently available."""
97
+ return torch._C._has_cudnn
98
+
99
+
100
+ def is_acceptable(tensor):
101
+ if not torch._C._get_cudnn_enabled():
102
+ return False
103
+ if tensor.device.type != "cuda" or tensor.dtype not in CUDNN_TENSOR_DTYPES:
104
+ return False
105
+ if not is_available():
106
+ warnings.warn(
107
+ "PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild "
108
+ "PyTorch making sure the library is visible to the build system."
109
+ )
110
+ return False
111
+ if not _init():
112
+ warnings.warn(
113
+ "cuDNN/MIOpen library not found. Check your {libpath}".format(
114
+ libpath={"darwin": "DYLD_LIBRARY_PATH", "win32": "PATH"}.get(
115
+ sys.platform, "LD_LIBRARY_PATH"
116
+ )
117
+ )
118
+ )
119
+ return False
120
+ return True
121
+
122
+
123
+ def set_flags(
124
+ _enabled=None,
125
+ _benchmark=None,
126
+ _benchmark_limit=None,
127
+ _deterministic=None,
128
+ _allow_tf32=None,
129
+ ):
130
+ orig_flags = (
131
+ torch._C._get_cudnn_enabled(),
132
+ torch._C._get_cudnn_benchmark(),
133
+ None if not is_available() else torch._C._cuda_get_cudnn_benchmark_limit(),
134
+ torch._C._get_cudnn_deterministic(),
135
+ torch._C._get_cudnn_allow_tf32(),
136
+ )
137
+ if _enabled is not None:
138
+ torch._C._set_cudnn_enabled(_enabled)
139
+ if _benchmark is not None:
140
+ torch._C._set_cudnn_benchmark(_benchmark)
141
+ if _benchmark_limit is not None and is_available():
142
+ torch._C._cuda_set_cudnn_benchmark_limit(_benchmark_limit)
143
+ if _deterministic is not None:
144
+ torch._C._set_cudnn_deterministic(_deterministic)
145
+ if _allow_tf32 is not None:
146
+ torch._C._set_cudnn_allow_tf32(_allow_tf32)
147
+ return orig_flags
148
+
149
+
150
+ @contextmanager
151
+ def flags(
152
+ enabled=False,
153
+ benchmark=False,
154
+ benchmark_limit=10,
155
+ deterministic=False,
156
+ allow_tf32=True,
157
+ ):
158
+ with __allow_nonbracketed_mutation():
159
+ orig_flags = set_flags(
160
+ enabled, benchmark, benchmark_limit, deterministic, allow_tf32
161
+ )
162
+ try:
163
+ yield
164
+ finally:
165
+ # recover the previous values
166
+ with __allow_nonbracketed_mutation():
167
+ set_flags(*orig_flags)
168
+
169
+
170
+ # The magic here is to allow us to intercept code like this:
171
+ #
172
+ # torch.backends.<cudnn|mkldnn>.enabled = True
173
+
174
+
175
+ class CudnnModule(PropModule):
176
+ def __init__(self, m, name):
177
+ super().__init__(m, name)
178
+
179
+ enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled)
180
+ deterministic = ContextProp(
181
+ torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic
182
+ )
183
+ benchmark = ContextProp(
184
+ torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark
185
+ )
186
+ benchmark_limit = None
187
+ if is_available():
188
+ benchmark_limit = ContextProp(
189
+ torch._C._cuda_get_cudnn_benchmark_limit,
190
+ torch._C._cuda_set_cudnn_benchmark_limit,
191
+ )
192
+ allow_tf32 = ContextProp(
193
+ torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32
194
+ )
195
+
196
+
197
+ # This is the sys.modules replacement trick, see
198
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
199
+ sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__)
200
+
201
+ # Add type annotation for the replaced module
202
+ enabled: bool
203
+ deterministic: bool
204
+ benchmark: bool
205
+ allow_tf32: bool
206
+ benchmark_limit: int
venv/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.cuda
2
+
3
+ try:
4
+ from torch._C import _cudnn
5
+ except ImportError:
6
+ # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(),
7
+ # so it's safe to not emit any checks here.
8
+ _cudnn = None # type: ignore[assignment]
9
+
10
+
11
+ def get_cudnn_mode(mode):
12
+ if mode == "RNN_RELU":
13
+ return int(_cudnn.RNNMode.rnn_relu)
14
+ elif mode == "RNN_TANH":
15
+ return int(_cudnn.RNNMode.rnn_tanh)
16
+ elif mode == "LSTM":
17
+ return int(_cudnn.RNNMode.lstm)
18
+ elif mode == "GRU":
19
+ return int(_cudnn.RNNMode.gru)
20
+ else:
21
+ raise Exception(f"Unknown mode: {mode}")
22
+
23
+
24
+ # NB: We don't actually need this class anymore (in fact, we could serialize the
25
+ # dropout state for even better reproducibility), but it is kept for backwards
26
+ # compatibility for old models.
27
+ class Unserializable:
28
+ def __init__(self, inner):
29
+ self.inner = inner
30
+
31
+ def get(self):
32
+ return self.inner
33
+
34
+ def __getstate__(self):
35
+ # Note: can't return {}, because python2 won't call __setstate__
36
+ # if the value evaluates to False
37
+ return "<unserializable>"
38
+
39
+ def __setstate__(self, state):
40
+ self.inner = None
41
+
42
+
43
+ def init_dropout_state(dropout, train, dropout_seed, dropout_state):
44
+ dropout_desc_name = "desc_" + str(torch.cuda.current_device())
45
+ dropout_p = dropout if train else 0
46
+ if (dropout_desc_name not in dropout_state) or (
47
+ dropout_state[dropout_desc_name].get() is None
48
+ ):
49
+ if dropout_p == 0:
50
+ dropout_state[dropout_desc_name] = Unserializable(None)
51
+ else:
52
+ dropout_state[dropout_desc_name] = Unserializable(
53
+ torch._cudnn_init_dropout_state( # type: ignore[call-arg]
54
+ dropout_p,
55
+ train,
56
+ dropout_seed,
57
+ self_ty=torch.uint8,
58
+ device=torch.device("cuda"),
59
+ )
60
+ )
61
+ dropout_ts = dropout_state[dropout_desc_name].get()
62
+ return dropout_ts
venv/lib/python3.10/site-packages/torch/backends/mha/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Config options to enable/disable C++ kernel for nn.functional.MHA
2
+ # and nn.TransformerEncoder
3
+ import torch
4
+
5
+ _is_fastpath_enabled: bool = True
6
+
7
+
8
+ def get_fastpath_enabled() -> bool:
9
+ """Returns whether fast path for TransformerEncoder and MultiHeadAttention
10
+ is enabled, or ``True`` if jit is scripting.
11
+
12
+ ..note:
13
+ The fastpath might not be run even if ``get_fastpath_enabled`` returns
14
+ ``True`` unless all conditions on inputs are met.
15
+ """
16
+ if not torch.jit.is_scripting():
17
+ return _is_fastpath_enabled
18
+ return True
19
+
20
+
21
+ def set_fastpath_enabled(value: bool) -> None:
22
+ """Sets whether fast path is enabled"""
23
+ global _is_fastpath_enabled
24
+ _is_fastpath_enabled = value
venv/lib/python3.10/site-packages/torch/backends/mha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (898 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/backends/mkl/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def is_available():
5
+ r"""Return whether PyTorch is built with MKL support."""
6
+ return torch._C.has_mkl
7
+
8
+
9
+ VERBOSE_OFF = 0
10
+ VERBOSE_ON = 1
11
+
12
+
13
+ class verbose:
14
+ """
15
+ On-demand oneMKL verbosing functionality.
16
+
17
+ To make it easier to debug performance issues, oneMKL can dump verbose
18
+ messages containing execution information like duration while executing
19
+ the kernel. The verbosing functionality can be invoked via an environment
20
+ variable named `MKL_VERBOSE`. However, this methodology dumps messages in
21
+ all steps. Those are a large amount of verbose messages. Moreover, for
22
+ investigating the performance issues, generally taking verbose messages
23
+ for one single iteration is enough. This on-demand verbosing functionality
24
+ makes it possible to control scope for verbose message dumping. In the
25
+ following example, verbose messages will be dumped out for the second
26
+ inference only.
27
+
28
+ .. highlight:: python
29
+ .. code-block:: python
30
+
31
+ import torch
32
+ model(data)
33
+ with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
34
+ model(data)
35
+
36
+ Args:
37
+ level: Verbose level
38
+ - ``VERBOSE_OFF``: Disable verbosing
39
+ - ``VERBOSE_ON``: Enable verbosing
40
+ """
41
+
42
+ def __init__(self, enable):
43
+ self.enable = enable
44
+
45
+ def __enter__(self):
46
+ if self.enable == VERBOSE_OFF:
47
+ return
48
+ st = torch._C._verbose.mkl_set_verbose(self.enable)
49
+ assert (
50
+ st
51
+ ), "Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
52
+ return self
53
+
54
+ def __exit__(self, exc_type, exc_val, exc_tb):
55
+ torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
56
+ return False
venv/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from contextlib import contextmanager
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ import torch
7
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
8
+
9
+
10
+ def is_available():
11
+ r"""Return whether PyTorch is built with MKL-DNN support."""
12
+ return torch._C._has_mkldnn
13
+
14
+
15
+ VERBOSE_OFF = 0
16
+ VERBOSE_ON = 1
17
+ VERBOSE_ON_CREATION = 2
18
+
19
+
20
+ class verbose:
21
+ """
22
+ On-demand oneDNN (former MKL-DNN) verbosing functionality.
23
+
24
+ To make it easier to debug performance issues, oneDNN can dump verbose
25
+ messages containing information like kernel size, input data size and
26
+ execution duration while executing the kernel. The verbosing functionality
27
+ can be invoked via an environment variable named `DNNL_VERBOSE`. However,
28
+ this methodology dumps messages in all steps. Those are a large amount of
29
+ verbose messages. Moreover, for investigating the performance issues,
30
+ generally taking verbose messages for one single iteration is enough.
31
+ This on-demand verbosing functionality makes it possible to control scope
32
+ for verbose message dumping. In the following example, verbose messages
33
+ will be dumped out for the second inference only.
34
+
35
+ .. highlight:: python
36
+ .. code-block:: python
37
+
38
+ import torch
39
+ model(data)
40
+ with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
41
+ model(data)
42
+
43
+ Args:
44
+ level: Verbose level
45
+ - ``VERBOSE_OFF``: Disable verbosing
46
+ - ``VERBOSE_ON``: Enable verbosing
47
+ - ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
48
+ """
49
+
50
+ def __init__(self, level):
51
+ self.level = level
52
+
53
+ def __enter__(self):
54
+ if self.level == VERBOSE_OFF:
55
+ return
56
+ st = torch._C._verbose.mkldnn_set_verbose(self.level)
57
+ assert (
58
+ st
59
+ ), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
60
+ return self
61
+
62
+ def __exit__(self, exc_type, exc_val, exc_tb):
63
+ torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
64
+ return False
65
+
66
+
67
+ def set_flags(_enabled):
68
+ orig_flags = (torch._C._get_mkldnn_enabled(),)
69
+ torch._C._set_mkldnn_enabled(_enabled)
70
+ return orig_flags
71
+
72
+
73
+ @contextmanager
74
+ def flags(enabled=False):
75
+ with __allow_nonbracketed_mutation():
76
+ orig_flags = set_flags(enabled)
77
+ try:
78
+ yield
79
+ finally:
80
+ with __allow_nonbracketed_mutation():
81
+ set_flags(orig_flags[0])
82
+
83
+
84
+ class MkldnnModule(PropModule):
85
+ def __init__(self, m, name):
86
+ super().__init__(m, name)
87
+
88
+ enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
89
+
90
+
91
+ if TYPE_CHECKING:
92
+ enabled: ContextProp
93
+
94
+
95
+ # Cool stuff from torch/backends/cudnn/__init__.py and
96
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
97
+ sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
venv/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/mps/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache as _lru_cache
2
+
3
+ from typing import Optional
4
+
5
+ import torch
6
+ from ...library import Library as _Library
7
+
8
+ __all__ = ["is_built", "is_available", "is_macos13_or_newer", "is_macos_or_newer"]
9
+
10
+
11
+ def is_built() -> bool:
12
+ r"""Return whether PyTorch is built with MPS support.
13
+
14
+ Note that this doesn't necessarily mean MPS is available; just that
15
+ if this PyTorch binary were run a machine with working MPS drivers
16
+ and devices, we would be able to use it.
17
+ """
18
+ return torch._C._has_mps
19
+
20
+
21
+ @_lru_cache
22
+ def is_available() -> bool:
23
+ r"""Return a bool indicating if MPS is currently available."""
24
+ return torch._C._mps_is_available()
25
+
26
+
27
+ @_lru_cache
28
+ def is_macos_or_newer(major: int, minor: int) -> bool:
29
+ r"""Return a bool indicating whether MPS is running on given MacOS or newer."""
30
+ return torch._C._mps_is_on_macos_or_newer(major, minor)
31
+
32
+
33
+ @_lru_cache
34
+ def is_macos13_or_newer(minor: int = 0) -> bool:
35
+ r"""Return a bool indicating whether MPS is running on MacOS 13 or newer."""
36
+ return torch._C._mps_is_on_macos_or_newer(13, minor)
37
+
38
+
39
+ _lib: Optional[_Library] = None
40
+
41
+
42
+ def _init():
43
+ r"""Register prims as implementation of var_mean and group_norm."""
44
+ global _lib
45
+ if is_built() is False or _lib is not None:
46
+ return
47
+ from ..._decomp.decompositions import (
48
+ native_group_norm_backward as _native_group_norm_backward,
49
+ )
50
+ from ..._refs import native_group_norm as _native_group_norm
51
+
52
+ _lib = _Library("aten", "IMPL")
53
+ _lib.impl("native_group_norm", _native_group_norm, "MPS")
54
+ _lib.impl("native_group_norm_backward", _native_group_norm_backward, "MPS")
venv/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import warnings
3
+ from contextlib import contextmanager
4
+ from functools import lru_cache as _lru_cache
5
+ from typing import Any
6
+
7
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
8
+
9
+ try:
10
+ import opt_einsum as _opt_einsum # type: ignore[import]
11
+ except ImportError:
12
+ _opt_einsum = None
13
+
14
+
15
+ @_lru_cache
16
+ def is_available() -> bool:
17
+ r"""Return a bool indicating if opt_einsum is currently available."""
18
+ return _opt_einsum is not None
19
+
20
+
21
+ def get_opt_einsum() -> Any:
22
+ r"""Return the opt_einsum package if opt_einsum is currently available, else None."""
23
+ return _opt_einsum
24
+
25
+
26
+ def _set_enabled(_enabled: bool) -> None:
27
+ if not is_available() and _enabled:
28
+ raise ValueError(
29
+ f"opt_einsum is not available, so setting `enabled` to {_enabled} will not reap "
30
+ "the benefits of calculating an optimal path for einsum. torch.einsum will "
31
+ "fall back to contracting from left to right. To enable this optimal path "
32
+ "calculation, please install opt-einsum."
33
+ )
34
+ global enabled
35
+ enabled = _enabled
36
+
37
+
38
+ def _get_enabled() -> bool:
39
+ return enabled
40
+
41
+
42
+ def _set_strategy(_strategy: str) -> None:
43
+ if not is_available():
44
+ raise ValueError(
45
+ f"opt_einsum is not available, so setting `strategy` to {_strategy} will not be meaningful. "
46
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
47
+ "Please install opt_einsum or unset `strategy`."
48
+ )
49
+ if not enabled:
50
+ raise ValueError(
51
+ f"opt_einsum is not enabled, so setting a `strategy` to {_strategy} will not be meaningful. "
52
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
53
+ "Please set `enabled` to `True` as well or unset `strategy`."
54
+ )
55
+ if _strategy not in ["auto", "greedy", "optimal"]:
56
+ raise ValueError(
57
+ f"`strategy` must be one of the following: [auto, greedy, optimal] but is {_strategy}"
58
+ )
59
+ global strategy
60
+ strategy = _strategy
61
+
62
+
63
+ def _get_strategy() -> str:
64
+ return strategy
65
+
66
+
67
+ def set_flags(_enabled=None, _strategy=None):
68
+ orig_flags = (enabled, None if not is_available() else strategy)
69
+ if _enabled is not None:
70
+ _set_enabled(_enabled)
71
+ if _strategy is not None:
72
+ _set_strategy(_strategy)
73
+ return orig_flags
74
+
75
+
76
+ @contextmanager
77
+ def flags(enabled=None, strategy=None):
78
+ with __allow_nonbracketed_mutation():
79
+ orig_flags = set_flags(enabled, strategy)
80
+ try:
81
+ yield
82
+ finally:
83
+ # recover the previous values
84
+ with __allow_nonbracketed_mutation():
85
+ set_flags(*orig_flags)
86
+
87
+
88
+ # The magic here is to allow us to intercept code like this:
89
+ #
90
+ # torch.backends.opt_einsum.enabled = True
91
+
92
+
93
+ class OptEinsumModule(PropModule):
94
+ def __init__(self, m, name):
95
+ super().__init__(m, name)
96
+
97
+ global enabled
98
+ enabled = ContextProp(_get_enabled, _set_enabled)
99
+ global strategy
100
+ strategy = None
101
+ if is_available():
102
+ strategy = ContextProp(_get_strategy, _set_strategy)
103
+
104
+
105
+ # This is the sys.modules replacement trick, see
106
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
107
+ sys.modules[__name__] = OptEinsumModule(sys.modules[__name__], __name__)
108
+
109
+ enabled = True if is_available() else False
110
+ strategy = "auto" if is_available() else None
venv/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.47 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/quantized/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+ from typing import List
4
+
5
+ import torch
6
+
7
+
8
+ # This function should correspond to the enums present in c10/core/QEngine.h
9
+ def _get_qengine_id(qengine: str) -> int:
10
+ if qengine == "none" or qengine == "" or qengine is None:
11
+ ret = 0
12
+ elif qengine == "fbgemm":
13
+ ret = 1
14
+ elif qengine == "qnnpack":
15
+ ret = 2
16
+ elif qengine == "onednn":
17
+ ret = 3
18
+ elif qengine == "x86":
19
+ ret = 4
20
+ else:
21
+ ret = -1
22
+ raise RuntimeError(f"{qengine} is not a valid value for quantized engine")
23
+ return ret
24
+
25
+
26
+ # This function should correspond to the enums present in c10/core/QEngine.h
27
+ def _get_qengine_str(qengine: int) -> str:
28
+ all_engines = {0: "none", 1: "fbgemm", 2: "qnnpack", 3: "onednn", 4: "x86"}
29
+ return all_engines.get(qengine, "*undefined")
30
+
31
+
32
+ class _QEngineProp:
33
+ def __get__(self, obj, objtype) -> str:
34
+ return _get_qengine_str(torch._C._get_qengine())
35
+
36
+ def __set__(self, obj, val: str) -> None:
37
+ torch._C._set_qengine(_get_qengine_id(val))
38
+
39
+
40
+ class _SupportedQEnginesProp:
41
+ def __get__(self, obj, objtype) -> List[str]:
42
+ qengines = torch._C._supported_qengines()
43
+ return [_get_qengine_str(qe) for qe in qengines]
44
+
45
+ def __set__(self, obj, val) -> None:
46
+ raise RuntimeError("Assignment not supported")
47
+
48
+
49
+ class QuantizedEngine(types.ModuleType):
50
+ def __init__(self, m, name):
51
+ super().__init__(name)
52
+ self.m = m
53
+
54
+ def __getattr__(self, attr):
55
+ return self.m.__getattribute__(attr)
56
+
57
+ engine = _QEngineProp()
58
+ supported_engines = _SupportedQEnginesProp()
59
+
60
+
61
+ # This is the sys.modules replacement trick, see
62
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
63
+ sys.modules[__name__] = QuantizedEngine(sys.modules[__name__], __name__)
64
+ engine: str
65
+ supported_engines: List[str]
venv/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
venv/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+
4
+ import torch
5
+
6
+
7
+ class _XNNPACKEnabled:
8
+ def __get__(self, obj, objtype):
9
+ return torch._C._is_xnnpack_enabled()
10
+
11
+ def __set__(self, obj, val):
12
+ raise RuntimeError("Assignment not supported")
13
+
14
+
15
+ class XNNPACKEngine(types.ModuleType):
16
+ def __init__(self, m, name):
17
+ super().__init__(name)
18
+ self.m = m
19
+
20
+ def __getattr__(self, attr):
21
+ return self.m.__getattribute__(attr)
22
+
23
+ enabled = _XNNPACKEnabled()
24
+
25
+
26
+ # This is the sys.modules replacement trick, see
27
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
28
+ sys.modules[__name__] = XNNPACKEngine(sys.modules[__name__], __name__)
venv/lib/python3.10/site-packages/torch/backends/xnnpack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/bernoulli.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/beta.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/binomial.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/cauchy.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/constraint_registry.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/constraints.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/continuous_bernoulli.cpython-310.pyc ADDED
Binary file (8.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/dirichlet.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/distribution.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/exp_family.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/exponential.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc ADDED
Binary file (3.52 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/gamma.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/geometric.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/gumbel.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/half_cauchy.cpython-310.pyc ADDED
Binary file (3.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/half_normal.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/inverse_gamma.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/kl.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/kumaraswamy.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/laplace.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc ADDED
Binary file (4.78 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/log_normal.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/logistic_normal.cpython-310.pyc ADDED
Binary file (2.41 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/lowrank_multivariate_normal.cpython-310.pyc ADDED
Binary file (8.24 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributions/__pycache__/mixture_same_family.cpython-310.pyc ADDED
Binary file (7.23 kB). View file