applied-ai-018 commited on
Commit
3afcb91
·
verified ·
1 Parent(s): 46d9881

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/4.attention.dense.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/_logging/__init__.py +16 -0
  5. venv/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_logging/_internal.py +1085 -0
  10. venv/lib/python3.10/site-packages/torch/_logging/_registrations.py +134 -0
  11. venv/lib/python3.10/site-packages/torch/_logging/structured.py +37 -0
  12. venv/lib/python3.10/site-packages/torch/_numpy/__init__.py +30 -0
  13. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py +86 -0
  21. venv/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py +881 -0
  22. venv/lib/python3.10/site-packages/torch/_numpy/_dtypes.py +434 -0
  23. venv/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py +216 -0
  24. venv/lib/python3.10/site-packages/torch/_numpy/_funcs.py +75 -0
  25. venv/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py +2053 -0
  26. venv/lib/python3.10/site-packages/torch/_numpy/_getlimits.py +15 -0
  27. venv/lib/python3.10/site-packages/torch/_numpy/_ndarray.py +591 -0
  28. venv/lib/python3.10/site-packages/torch/_numpy/_normalizations.py +258 -0
  29. venv/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py +456 -0
  30. venv/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py +334 -0
  31. venv/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py +73 -0
  32. venv/lib/python3.10/site-packages/torch/_numpy/_util.py +261 -0
  33. venv/lib/python3.10/site-packages/torch/_numpy/fft.py +130 -0
  34. venv/lib/python3.10/site-packages/torch/_numpy/linalg.py +239 -0
  35. venv/lib/python3.10/site-packages/torch/_numpy/random.py +191 -0
  36. venv/lib/python3.10/site-packages/torch/nn/backends/__init__.py +0 -0
  37. venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/nn/backends/thnn.py +4 -0
  40. venv/lib/python3.10/site-packages/torch/nn/parallel/__init__.py +14 -0
  41. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/nn/parallel/_functions.py +126 -0
  50. venv/lib/python3.10/site-packages/torch/nn/parallel/comm.py +236 -0
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb85a7743d2d09da9ec7f26db1767aaaeef955d4077692bd576c4ecd86c262da
3
+ size 33555533
ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3fc099776710bd3754d0c00898989896ebfccd8bfc8166db8f399c0b25a1582
3
+ size 33555533
ckpts/universal/global_step120/zero/4.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68d950b662024ae4be69b37e2feeb330719bc7a596c4452c5a3eeab689f31ba4
3
+ size 16778317
venv/lib/python3.10/site-packages/torch/_logging/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Top level logging module for torch logging
2
+ # Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
3
+ # Simple setup for onboarding (see above doc for more detail):
4
+ # 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples)
5
+ # 2. register any artifacts (<artifact_name> below) in torch._logging._registrations
6
+ # a. call getArtifactLogger(__name__, <artifact_name>) at your logging site instead of the standard logger to log your artifact
7
+ import torch._logging._registrations
8
+ from ._internal import (
9
+ _init_logs,
10
+ DEFAULT_LOGGING,
11
+ getArtifactLogger,
12
+ LazyString,
13
+ set_logs,
14
+ trace_structured,
15
+ warning_once,
16
+ )
venv/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (423 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
venv/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/_logging/_internal.py ADDED
@@ -0,0 +1,1085 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import hashlib
3
+ import itertools
4
+ import json
5
+ import logging
6
+ import os
7
+ import os.path
8
+ import re
9
+ import tempfile
10
+ from dataclasses import dataclass, field
11
+ from importlib import __import__
12
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
13
+ from weakref import WeakSet
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+ # This is a synthetic logger which doesn't correspond to an actual logger,
18
+ # but handles all of our "tracing" logging, which is structured and doesn't go
19
+ # to stderr but always goes to a dedicated log file. We don't put these
20
+ # loggers in the classic module hierarchy, because we don't want a suppression
21
+ # of logs to also cause a trace to get suppressed (traces typically are not
22
+ # collected, unless we are in prod, in which case they always are collected.)
23
+ #
24
+ # TODO: Maybe we should allow for some sub-hierarchy so you can control which
25
+ # traces you want to collect, for performance reasons.
26
+ #
27
+ # See https://docs.google.com/document/d/1CX_hJ0PNy9f3R1y8TJrfkSeLkvGjjjLU84BSXgS2AZ8/edit
28
+ trace_log = logging.getLogger("torch.__trace")
29
+
30
+ DEFAULT_LOG_LEVEL = logging.WARNING
31
+ LOG_ENV_VAR = "TORCH_LOGS"
32
+ LOG_OUT_ENV_VAR = "TORCH_LOGS_OUT"
33
+ LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT"
34
+ TRACE_ENV_VAR = "TORCH_TRACE"
35
+
36
+
37
+ @dataclass
38
+ class LogRegistry:
39
+ # shorthand name to log qualified name
40
+ # Note: this only contains loggers registered
41
+ # from register_log
42
+ # e.g. "dynamo" -> "torch._dynamo"
43
+ log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict)
44
+
45
+ # artifact logger qualified names,
46
+ # this is populated lazily, as calls to getArtifactLogger
47
+ # currently formatted as <module>.__<artifact_name>
48
+ # e.g. "torch._dynamo.convert_frame.__guards"
49
+ artifact_log_qnames: Set[str] = field(default_factory=set)
50
+
51
+ # child logs of registered logs if specified via open
52
+ # registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
53
+ # these need to be tracked so their levels can be reset properly
54
+ # e.g. "torch._dynamo.output_graph"
55
+ child_log_qnames: Set[str] = field(default_factory=set)
56
+
57
+ # artifact names, populated by register_artifact
58
+ # e.g. "guards"
59
+ artifact_names: Set[str] = field(default_factory=set)
60
+
61
+ # Artifacts that should be visible by default in the error message
62
+ visible_artifacts: Set[str] = field(default_factory=set)
63
+
64
+ # A short description of each artifact
65
+ artifact_descriptions: Dict[str, str] = field(default_factory=dict)
66
+
67
+ # artifacts which are not displayed unless explicitly named in the
68
+ # settings. Ex. output_code is NOT displayed even if the inductor
69
+ # log level is set to DEBUG. It must be explicitly named in the settings
70
+ off_by_default_artifact_names: Set[str] = field(default_factory=set)
71
+
72
+ # logging format string for artifacts
73
+ artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict)
74
+
75
+ def is_artifact(self, name):
76
+ return name in self.artifact_names
77
+
78
+ def is_log(self, alias):
79
+ return alias in self.log_alias_to_log_qnames
80
+
81
+ # register a log with an alias
82
+ def register_log(self, alias, log_qnames: Union[str, List[str]]):
83
+ if isinstance(log_qnames, str):
84
+ log_qnames = [log_qnames]
85
+ self.log_alias_to_log_qnames[alias] = log_qnames
86
+
87
+ # register an artifact name
88
+ def register_artifact_name(
89
+ self, name, description, visible, off_by_default, log_format
90
+ ):
91
+ self.artifact_names.add(name)
92
+ if visible:
93
+ self.visible_artifacts.add(name)
94
+ self.artifact_descriptions[name] = description
95
+
96
+ # if off by default, don't enable it
97
+ # when log_name's log_level is set to DEBUG
98
+ if off_by_default:
99
+ self.off_by_default_artifact_names.add(name)
100
+
101
+ if log_format is not None:
102
+ self.artifact_log_formatters[name] = logging.Formatter(log_format)
103
+
104
+ # register the qualified name of an artifact log
105
+ # this is needed to know which logs need to be reset
106
+ # whenever the log_state is changed
107
+ def register_artifact_log(self, artifact_log_qname):
108
+ self.artifact_log_qnames.add(artifact_log_qname)
109
+
110
+ def register_child_log(self, log_qname):
111
+ self.child_log_qnames.add(log_qname)
112
+
113
+ # flattens all the qnames together (TODO: consider memoizing?)
114
+ def get_log_qnames(self) -> Set[str]:
115
+ return {
116
+ qname
117
+ for qnames in self.log_alias_to_log_qnames.values()
118
+ for qname in qnames
119
+ }
120
+
121
+ def get_artifact_log_qnames(self):
122
+ return set(self.artifact_log_qnames)
123
+
124
+ def get_child_log_qnames(self):
125
+ return set(self.child_log_qnames)
126
+
127
+ def is_off_by_default(self, artifact_qname):
128
+ return artifact_qname in self.off_by_default_artifact_names
129
+
130
+
131
+ @dataclass
132
+ class LogState:
133
+ # qualified log names -> currently set log level
134
+ log_qname_to_level: Dict[str, str] = field(default_factory=dict)
135
+
136
+ # the set of currently enabled artifacts
137
+ artifact_names: Set[str] = field(default_factory=set)
138
+
139
+ def enable_artifact(self, artifact_name):
140
+ self.artifact_names.add(artifact_name)
141
+
142
+ def is_artifact_enabled(self, name):
143
+ return name in self.artifact_names
144
+
145
+ def enable_log(self, log_qnames, log_level):
146
+ if isinstance(log_qnames, str):
147
+ log_qnames = [log_qnames]
148
+ for log_qname in log_qnames:
149
+ self.log_qname_to_level[log_qname] = log_level
150
+
151
+ def get_log_level_pairs(self):
152
+ """Returns all qualified module names for which the user requested
153
+ explicit logging settings.
154
+
155
+ .. warning:
156
+
157
+ This function used to return all loggers, regardless of whether
158
+ or not the user specified them or not; it now only returns logs
159
+ which were explicitly mentioned by the user (and torch, which
160
+ always is implicitly requested when we initialize our logging
161
+ subsystem.)
162
+ """
163
+ return self.log_qname_to_level.items()
164
+
165
+ def clear(self):
166
+ self.log_qname_to_level.clear()
167
+ self.artifact_names.clear()
168
+
169
+
170
+ log_registry = LogRegistry()
171
+ log_state = LogState()
172
+
173
+ # sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
174
+ DEFAULT_LOGGING = {
175
+ "dynamo": logging.DEBUG,
176
+ "aot": logging.DEBUG,
177
+ "inductor": logging.DEBUG,
178
+ "ddp_graphs": True,
179
+ "graph_breaks": True,
180
+ "guards": True,
181
+ "recompiles": True,
182
+ "dynamic": logging.INFO,
183
+ }
184
+
185
+
186
+ def set_logs(
187
+ *,
188
+ all: Optional[int] = None,
189
+ dynamo: Optional[int] = None,
190
+ aot: Optional[int] = None,
191
+ autograd: Optional[int] = None,
192
+ dynamic: Optional[int] = None,
193
+ inductor: Optional[int] = None,
194
+ distributed: Optional[int] = None,
195
+ dist_c10d: Optional[int] = None,
196
+ dist_ddp: Optional[int] = None,
197
+ dist_fsdp: Optional[int] = None,
198
+ onnx: Optional[int] = None,
199
+ bytecode: bool = False,
200
+ aot_graphs: bool = False,
201
+ aot_joint_graph: bool = False,
202
+ ddp_graphs: bool = False,
203
+ graph: bool = False,
204
+ graph_code: bool = False,
205
+ graph_breaks: bool = False,
206
+ graph_sizes: bool = False,
207
+ guards: bool = False,
208
+ recompiles: bool = False,
209
+ recompiles_verbose: bool = False,
210
+ trace_source: bool = False,
211
+ trace_call: bool = False,
212
+ output_code: bool = False,
213
+ schedule: bool = False,
214
+ perf_hints: bool = False,
215
+ post_grad_graphs: bool = False,
216
+ onnx_diagnostics: bool = False,
217
+ fusion: bool = False,
218
+ overlap: bool = False,
219
+ export: Optional[int] = None,
220
+ modules: Optional[Dict[str, Union[int, bool]]] = None,
221
+ cudagraphs: bool = False,
222
+ sym_node: bool = False,
223
+ ):
224
+ """
225
+ Sets the log level for individual components and toggles individual log
226
+ artifact types.
227
+
228
+ .. warning:: This feature is a prototype and may have compatibility
229
+ breaking changes in the future.
230
+
231
+ .. note:: The ``TORCH_LOGS`` environment variable has complete precedence
232
+ over this function, so if it was set, this function does nothing.
233
+
234
+ A component is a set of related features in PyTorch. All of the log
235
+ messages emitted from a given component have their own log levels. If the
236
+ log level of a particular message has priority greater than or equal to its
237
+ component's log level setting, it is emitted. Otherwise, it is suppressed.
238
+ This allows you to, for instance, silence large groups of log messages that
239
+ are not relevant to you and increase verbosity of logs for components that
240
+ are relevant. The expected log level values, ordered from highest to lowest
241
+ priority, are:
242
+
243
+ * ``logging.CRITICAL``
244
+ * ``logging.ERROR``
245
+ * ``logging.WARNING``
246
+ * ``logging.INFO``
247
+ * ``logging.DEBUG``
248
+ * ``logging.NOTSET``
249
+
250
+ See documentation for the Python ``logging`` module for more information on
251
+ log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
252
+
253
+ An artifact is a particular type of log message. Each artifact is assigned
254
+ to a parent component. A component can emit many different kinds of
255
+ artifacts. In general, an artifact is emitted if either its corresponding
256
+ setting in the argument list below is turned on or if its parent component
257
+ is set to a log level less than or equal to the log level of the artifact.
258
+
259
+ Keyword args:
260
+ all (:class:`Optional[int]`):
261
+ The default log level for all components. Default: ``logging.WARN``
262
+
263
+ dynamo (:class:`Optional[int]`):
264
+ The log level for the TorchDynamo component. Default: ``logging.WARN``
265
+
266
+ aot (:class:`Optional[int]`):
267
+ The log level for the AOTAutograd component. Default: ``logging.WARN``
268
+
269
+ autograd (:class:`Optional[int]`):
270
+ The log level for autograd. Default: ``logging.WARN``
271
+
272
+ inductor (:class:`Optional[int]`):
273
+ The log level for the TorchInductor component. Default: ``logging.WARN``
274
+
275
+ dynamic (:class:`Optional[int]`):
276
+ The log level for dynamic shapes. Default: ``logging.WARN``
277
+
278
+ distributed (:class:`Optional[int]`):
279
+ Whether to log c10d communication operations and other debug info from PyTorch Distributed components.
280
+ Default: ``logging.WARN``
281
+
282
+ dist_c10d (:class:`Optional[int]`):
283
+ Whether to log c10d communication operations related debug info in PyTorch Distributed components.
284
+ Default: ``logging.WARN``
285
+
286
+ dist_ddp (:class:`Optional[int]`):
287
+ Whether to log debug info related to ``DistributedDataParallel``(DDP) from PyTorch Distributed components.
288
+ Default: ``logging.WARN``
289
+
290
+ dist_fsdp (:class:`Optional[int]`):
291
+ Whether to log debug info related to ``FullyShardedDataParallel``(FSDP) in PyTorch Distributed components.
292
+ Default: ``logging.WARN``
293
+
294
+ onnx (:class:`Optional[int]`):
295
+ The log level for the ONNX exporter component. Default: ``logging.WARN``
296
+
297
+ bytecode (:class:`bool`):
298
+ Whether to emit the original and generated bytecode from TorchDynamo.
299
+ Default: ``False``
300
+
301
+ aot_graphs (:class:`bool`):
302
+ Whether to emit the graphs generated by AOTAutograd. Default: ``False``
303
+
304
+ aot_joint_graph (:class:`bool`):
305
+ Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
306
+
307
+ inductor (:class:`Optional[int]`):
308
+ Whether to log information from inductor cudagraphs. Default: ``logging.WARN``
309
+
310
+ ddp_graphs (:class:`bool`):
311
+ Whether to emit graphs generated by DDPOptimizer. Default: ``False``
312
+
313
+ graph (:class:`bool`):
314
+ Whether to emit the graph captured by TorchDynamo in tabular format.
315
+ Default: ``False``
316
+
317
+ graph_code (:class:`bool`):
318
+ Whether to emit the python source of the graph captured by TorchDynamo.
319
+ Default: ``False``
320
+
321
+ graph_breaks (:class:`bool`):
322
+ Whether to emit the graph breaks encountered by TorchDynamo.
323
+ Default: ``False``
324
+
325
+ graph_sizes (:class:`bool`):
326
+ Whether to emit tensor sizes of the graph captured by TorchDynamo.
327
+ Default: ``False``
328
+
329
+ guards (:class:`bool`):
330
+ Whether to emit the guards generated by TorchDynamo for each compiled
331
+ function. Default: ``False``
332
+
333
+ recompiles (:class:`bool`):
334
+ Whether to emit a guard failure reason and message every time
335
+ TorchDynamo recompiles a function. Default: ``False``
336
+
337
+ recompiles_verbose (:class:`bool`):
338
+ Whether to emit all guard failure reasons when TorchDynamo recompiles
339
+ a function, even those that are not actually run. Default: ``False``
340
+
341
+ trace_source (:class:`bool`):
342
+ Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
343
+
344
+ trace_call (:class:`bool`):
345
+ Whether to emit detailed line location when TorchDynamo creates an FX node
346
+ corresponding to function call. Python 3.11+ only. Default: ``False``
347
+
348
+ output_code (:class:`bool`):
349
+ Whether to emit the TorchInductor output code. Default: ``False``
350
+
351
+ schedule (:class:`bool`):
352
+ Whether to emit the TorchInductor schedule. Default: ``False``
353
+
354
+ perf_hints (:class:`bool`):
355
+ Whether to emit the TorchInductor perf hints. Default: ``False``
356
+
357
+ post_grad_graphs (:class:`bool`):
358
+ Whether to emit the graphs generated by after post grad passes. Default: ``False``
359
+
360
+ onnx_diagnostics (:class:`bool`):
361
+ Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
362
+
363
+ fusion (:class:`bool`):
364
+ Whether to emit detailed Inductor fusion decisions. Default: ``False``
365
+
366
+ overlap (:class:`bool`):
367
+ Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False``
368
+
369
+ sym_node (:class:`bool`):
370
+ Whether to emit debug info for various SymNode opterations. Default: ``False``
371
+
372
+ export (:class:`Optional[int]`):
373
+ The log level for export. Default: ``logging.WARN``
374
+
375
+ modules (dict):
376
+ This argument provides an alternate way to specify the above log
377
+ component and artifact settings, in the format of a keyword args
378
+ dictionary given as a single argument. There are two cases
379
+ where this is useful (1) if a new log component or artifact has
380
+ been registered but a keyword argument for it has not been added
381
+ to this function and (2) if the log level for an unregistered module
382
+ needs to be set. This can be done by providing the fully-qualified module
383
+ name as the key, with the log level as the value. Default: ``None``
384
+
385
+
386
+ Example::
387
+
388
+ >>> # xdoctest: +SKIP
389
+ >>> import logging
390
+
391
+ # The following changes the "dynamo" component to emit DEBUG-level
392
+ # logs, and to emit "graph_code" artifacts.
393
+
394
+ >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
395
+
396
+ # The following enables the logs for a different module
397
+
398
+ >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
399
+ """
400
+ # ignore if env var is set
401
+ if LOG_ENV_VAR in os.environ:
402
+ log.warning(
403
+ "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
404
+ )
405
+ return
406
+
407
+ log_state.clear()
408
+
409
+ modules = modules or {}
410
+
411
+ def _set_logs(**kwargs):
412
+ for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
413
+ if val is None:
414
+ continue
415
+
416
+ if log_registry.is_artifact(alias):
417
+ if not isinstance(val, bool):
418
+ raise ValueError(
419
+ f"Expected bool to enable artifact {alias}, received {val}"
420
+ )
421
+
422
+ if val:
423
+ log_state.enable_artifact(alias)
424
+ elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
425
+ if val not in logging._levelToName:
426
+ raise ValueError(
427
+ f"Unrecognized log level for log {alias}: {val}, valid level values "
428
+ f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
429
+ )
430
+
431
+ log_state.enable_log(
432
+ log_registry.log_alias_to_log_qnames.get(alias, alias), val
433
+ )
434
+ else:
435
+ raise ValueError(
436
+ f"Unrecognized log or artifact name passed to set_logs: {alias}"
437
+ )
438
+
439
+ _init_logs()
440
+
441
+ _set_logs(
442
+ torch=all,
443
+ dynamo=dynamo,
444
+ aot=aot,
445
+ autograd=autograd,
446
+ inductor=inductor,
447
+ dynamic=dynamic,
448
+ bytecode=bytecode,
449
+ aot_graphs=aot_graphs,
450
+ aot_joint_graph=aot_joint_graph,
451
+ ddp_graphs=ddp_graphs,
452
+ distributed=distributed,
453
+ dist_c10d=dist_c10d,
454
+ dist_ddp=dist_ddp,
455
+ dist_fsdp=dist_fsdp,
456
+ graph=graph,
457
+ graph_code=graph_code,
458
+ graph_breaks=graph_breaks,
459
+ graph_sizes=graph_sizes,
460
+ guards=guards,
461
+ recompiles=recompiles,
462
+ recompiles_verbose=recompiles_verbose,
463
+ trace_source=trace_source,
464
+ trace_call=trace_call,
465
+ output_code=output_code,
466
+ schedule=schedule,
467
+ perf_hints=perf_hints,
468
+ post_grad_graphs=post_grad_graphs,
469
+ onnx=onnx,
470
+ onnx_diagnostics=onnx_diagnostics,
471
+ fusion=fusion,
472
+ overlap=overlap,
473
+ sym_node=sym_node,
474
+ export=export,
475
+ cudagraphs=cudagraphs,
476
+ )
477
+
478
+
479
+ def get_loggers():
480
+ """
481
+ Returns: a list of all registered loggers
482
+ """
483
+ return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
484
+
485
+
486
+ def register_log(setting_name, log_name):
487
+ """
488
+ Enables a log to be controlled by the env var and user API with the setting_name
489
+ Args:
490
+ setting_name: the shorthand name used in the env var and user API
491
+ log_name: the log name that the setting_name is associated with
492
+ """
493
+ log_registry.register_log(setting_name, log_name)
494
+
495
+
496
+ def register_artifact(
497
+ setting_name, description, visible=False, off_by_default=False, log_format=None
498
+ ):
499
+ """
500
+ Enables an artifact to be controlled by the env var and user API with name
501
+ Args:
502
+ setting_name: the shorthand name used in the env var and user API
503
+ description: A description of what this outputs
504
+ visible: Whether it gets suggested to users by default
505
+ off_by_default: whether this artifact should be logged when the ancestor loggers
506
+ are enabled at level DEBUG
507
+ """
508
+ log_registry.register_artifact_name(
509
+ setting_name, description, visible, off_by_default, log_format
510
+ )
511
+
512
+
513
+ def getArtifactLogger(module_qname, artifact_name):
514
+ if artifact_name not in log_registry.artifact_names:
515
+ raise ValueError(
516
+ f"Artifact name: {repr(artifact_name)} not registered,"
517
+ f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
518
+ )
519
+ qname = module_qname + f".__{artifact_name}"
520
+ log = logging.getLogger(qname)
521
+ log.artifact_name = artifact_name # type: ignore[attr-defined]
522
+ log_registry.register_artifact_log(qname)
523
+ configure_artifact_log(log)
524
+ return log
525
+
526
+
527
+ INCR_VERBOSITY_CHAR = "+"
528
+ DECR_VERBOSITY_CHAR = "-"
529
+ VERBOSITY_REGEX = (
530
+ "("
531
+ + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
532
+ + "?)"
533
+ )
534
+
535
+
536
+ def configure_artifact_log(log):
537
+ # If the artifact is off by default, then it should only be logged when explicitly
538
+ # enabled; set propagate to False so that this artifact is not propagated
539
+ # to its ancestor logger
540
+ if log_registry.is_off_by_default(log.artifact_name):
541
+ log.propagate = False
542
+
543
+ # enable artifact logging when explicitly enabled
544
+ if log_state.is_artifact_enabled(log.artifact_name):
545
+ log.setLevel(logging.DEBUG)
546
+ log.propagate = True
547
+
548
+
549
+ # match a comma separated list of loggable names (whitespace allowed after commas)
550
+ def _gen_settings_regex():
551
+ return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
552
+
553
+
554
+ def _validate_settings(settings):
555
+ return re.fullmatch(_gen_settings_regex(), settings) is not None
556
+
557
+
558
+ def help_message(verbose=False):
559
+ def pad_to(s, length=30):
560
+ assert len(s) <= length
561
+ return s + " " * (length - len(s))
562
+
563
+ if verbose:
564
+ printed_artifacts = log_registry.artifact_names
565
+ else:
566
+ printed_artifacts = log_registry.visible_artifacts
567
+
568
+ if verbose:
569
+ heading = "All registered names"
570
+ else:
571
+ heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
572
+ lines = (
573
+ ["all"]
574
+ + sorted(log_registry.log_alias_to_log_qnames.keys())
575
+ + sorted(
576
+ [
577
+ f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
578
+ for name in printed_artifacts
579
+ ]
580
+ )
581
+ )
582
+ setting_info = " " + "\n ".join(lines)
583
+ examples = """
584
+ Examples:
585
+ TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
586
+ logging.DEBUG and AOT to logging.INFO
587
+
588
+ TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
589
+ logging.ERROR and TorchInductor to logging.DEBUG
590
+
591
+ TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
592
+
593
+ TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
594
+ to logging.DEBUG and enable the schedule artifact
595
+
596
+ TORCH_LOGS="+some.random.module,schedule" will set the log level of
597
+ some.random.module to logging.DEBUG and enable the schedule artifact
598
+
599
+ TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format
600
+ string will set the output format
601
+ Valid keys are "levelname", "message", "pathname", "levelno", "lineno",
602
+ "filename" and "name".
603
+
604
+ TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
605
+ well. This is useful when the output is long.
606
+ """ # flake8: noqa: B950
607
+ msg = f"""
608
+ TORCH_LOGS Info
609
+ {examples}
610
+
611
+ {heading}
612
+ {setting_info}
613
+ """
614
+ return msg
615
+
616
+
617
+ def _invalid_settings_err_msg(settings, verbose=False):
618
+ valid_settings = ", ".join(
619
+ ["all"]
620
+ + list(log_registry.log_alias_to_log_qnames.keys())
621
+ + list(log_registry.artifact_names)
622
+ )
623
+ msg = f"""
624
+ Invalid log settings: {settings}, must be a comma separated list of fully
625
+ qualified module names, registered log names or registered artifact names.
626
+ For more info on various settings, try TORCH_LOGS="help"
627
+ Valid settings:
628
+ {valid_settings}
629
+ """
630
+ return msg
631
+
632
+
633
+ @functools.lru_cache
634
+ def _parse_log_settings(settings):
635
+ if settings == "":
636
+ return dict()
637
+
638
+ if settings == "help":
639
+ raise ValueError(help_message(verbose=False))
640
+ elif settings == "+help":
641
+ raise ValueError(help_message(verbose=True))
642
+ if not _validate_settings(settings):
643
+ raise ValueError(_invalid_settings_err_msg(settings))
644
+
645
+ settings = re.sub(r"\s+", "", settings)
646
+ log_names = settings.split(",")
647
+
648
+ def get_name_level_pair(name):
649
+ clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
650
+ clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
651
+
652
+ if name[0] == INCR_VERBOSITY_CHAR:
653
+ level = logging.DEBUG
654
+ elif name[0] == DECR_VERBOSITY_CHAR:
655
+ level = logging.ERROR
656
+ else:
657
+ level = logging.INFO
658
+
659
+ return clean_name, level
660
+
661
+ log_state = LogState()
662
+
663
+ for name in log_names:
664
+ name, level = get_name_level_pair(name)
665
+
666
+ if name == "all":
667
+ name = "torch"
668
+
669
+ if log_registry.is_log(name):
670
+ assert level is not None
671
+ log_qnames = log_registry.log_alias_to_log_qnames[name]
672
+ log_state.enable_log(log_qnames, level)
673
+ elif log_registry.is_artifact(name):
674
+ log_state.enable_artifact(name)
675
+ elif _is_valid_module(name):
676
+ if not _has_registered_parent(name):
677
+ log_registry.register_log(name, name)
678
+ else:
679
+ log_registry.register_child_log(name)
680
+ log_state.enable_log(name, level)
681
+ else:
682
+ raise ValueError(_invalid_settings_err_msg(settings))
683
+
684
+ return log_state
685
+
686
+
687
+ def _is_valid_module(qname):
688
+ try:
689
+ __import__(qname)
690
+ return True
691
+ except ImportError:
692
+ return False
693
+
694
+
695
+ def _update_log_state_from_env():
696
+ global log_state
697
+ log_setting = os.environ.get(LOG_ENV_VAR, None)
698
+ if log_setting is not None:
699
+ log_state = _parse_log_settings(log_setting)
700
+
701
+
702
+ def _has_registered_parent(log_qname):
703
+ cur_log = logging.getLogger(log_qname)
704
+
705
+ registered_log_qnames = log_registry.get_log_qnames()
706
+
707
+ while cur_log.parent:
708
+ if cur_log.name in registered_log_qnames:
709
+ return True
710
+ cur_log = cur_log.parent
711
+
712
+ return False
713
+
714
+
715
+ # apply custom formats to artifacts when necessary
716
+ class TorchLogsFormatter(logging.Formatter):
717
+ def __init__(self, *, trace: bool = False):
718
+ super().__init__()
719
+ self._is_trace = trace
720
+
721
+ def format(self, record):
722
+ artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None)
723
+ if artifact_name is not None:
724
+ artifact_formatter = log_registry.artifact_log_formatters.get(
725
+ artifact_name, None
726
+ )
727
+ if artifact_formatter is not None:
728
+ return artifact_formatter.format(record)
729
+
730
+ record.message = record.getMessage()
731
+ record.asctime = self.formatTime(record, "%m%d %H:%M:%S")
732
+
733
+ # exception handling - copied from logging.Formatter.format
734
+ s = record.message
735
+ if record.exc_info:
736
+ # Cache the traceback text to avoid converting it multiple times
737
+ # (it's constant anyway)
738
+ if not record.exc_text:
739
+ record.exc_text = self.formatException(record.exc_info)
740
+ if record.exc_text:
741
+ if s[-1:] != "\n":
742
+ s = s + "\n"
743
+ s = s + record.exc_text
744
+ if record.stack_info:
745
+ if s[-1:] != "\n":
746
+ s = s + "\n"
747
+ s = s + self.formatStack(record.stack_info)
748
+
749
+ record.rankprefix = ""
750
+ if not self._is_trace and dist.is_available() and dist.is_initialized():
751
+ record.rankprefix = f"[rank{dist.get_rank()}]:"
752
+
753
+ record.traceid = ""
754
+ if (
755
+ not self._is_trace
756
+ and (trace_id := torch._guards.CompileContext.current_trace_id())
757
+ is not None
758
+ ):
759
+ record.traceid = f" [{trace_id}]"
760
+
761
+ glog_level_to_abbr = {
762
+ "DEBUG": "V", # V is for VERBOSE in glog
763
+ "INFO": "I",
764
+ "WARNING": "W",
765
+ "ERROR": "E",
766
+ "CRITICAL": "C",
767
+ }
768
+
769
+ shortlevel = glog_level_to_abbr.get(record.levelname, record.levelname)
770
+
771
+ record.artifactprefix = ""
772
+ if artifact_name is not None:
773
+ record.artifactprefix = f" [__{artifact_name}]"
774
+
775
+ prefix = (
776
+ f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.thread} "
777
+ f"{os.path.relpath(record.pathname, os.path.dirname(os.path.dirname(torch.__file__)))}:"
778
+ f"{record.lineno}]{record.traceid}{record.artifactprefix}"
779
+ )
780
+ if self._is_trace:
781
+ assert s == ""
782
+ r = f"{prefix} {json.dumps(record.metadata)}"
783
+ if record.payload is not None:
784
+ r += "".join(f"\n\t{l}" for l in record.payload.split("\n"))
785
+ return r
786
+ else:
787
+ lines = s.split("\n")
788
+ return "\n".join(f"{prefix} {l}" for l in lines)
789
+
790
+
791
+ def _default_formatter():
792
+ fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None)
793
+ if fmt is None:
794
+ return TorchLogsFormatter()
795
+ else:
796
+ if fmt in ("short", "basic"):
797
+ fmt = logging.BASIC_FORMAT
798
+ return logging.Formatter(fmt)
799
+
800
+
801
+ DEFAULT_FORMATTER = _default_formatter()
802
+
803
+
804
+ def _setup_handlers(create_handler_fn, log):
805
+ debug_handler = _track_handler(create_handler_fn())
806
+ debug_handler.setFormatter(DEFAULT_FORMATTER)
807
+ debug_handler.setLevel(logging.DEBUG)
808
+ log.addHandler(debug_handler)
809
+
810
+
811
+ handlers = WeakSet() # type: ignore[var-annotated]
812
+
813
+
814
+ # mark handlers that we've created
815
+ # so we don't modify user handlers
816
+ def _track_handler(handler):
817
+ handlers.add(handler)
818
+ return handler
819
+
820
+
821
+ def _is_torch_handler(handler):
822
+ return handler in handlers
823
+
824
+
825
+ # clears all torch handlers on specified loggers
826
+ def _clear_handlers(log):
827
+ to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)]
828
+ for handler in to_remove:
829
+ log.removeHandler(handler)
830
+
831
+
832
+ def _reset_logs():
833
+ # reset all registered logs
834
+ for log_qname in log_registry.get_log_qnames():
835
+ log = logging.getLogger(log_qname)
836
+ log.setLevel(logging.WARNING)
837
+ log.propagate = False
838
+ _clear_handlers(log)
839
+
840
+ # reset all artifact and child logs
841
+ for artifact_log_qname in itertools.chain(
842
+ log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames()
843
+ ):
844
+ log = logging.getLogger(artifact_log_qname)
845
+ log.setLevel(logging.NOTSET)
846
+ log.propagate = True
847
+
848
+ trace_log.propagate = False
849
+ _clear_handlers(trace_log)
850
+
851
+
852
+ def _get_log_state():
853
+ return log_state
854
+
855
+
856
+ def _set_log_state(state):
857
+ global log_state
858
+ log_state = state
859
+
860
+
861
+ def _init_logs(log_file_name=None):
862
+ _reset_logs()
863
+ _update_log_state_from_env()
864
+
865
+ out = os.environ.get(LOG_OUT_ENV_VAR, None)
866
+ if out is not None:
867
+ log_file_name = out
868
+
869
+ # First, reset all known (registered) loggers to NOTSET, so that they
870
+ # respect their parent log level
871
+ for log_qname in log_registry.get_log_qnames():
872
+ # But not the top level torch level: this defaults to WARNING so
873
+ # that our log messages don't leak to the lower levels
874
+ if log_qname == "torch":
875
+ continue
876
+ log = logging.getLogger(log_qname)
877
+ log.setLevel(logging.NOTSET)
878
+
879
+ # Now, for all loggers which the user requested to have non-standard
880
+ # logging behavior, modify their log levels
881
+ for log_qname, level in log_state.get_log_level_pairs():
882
+ log = logging.getLogger(log_qname)
883
+ log.setLevel(level)
884
+
885
+ # Finally, setup handlers for all registered loggers
886
+ for log_qname in log_registry.get_log_qnames():
887
+ log = logging.getLogger(log_qname)
888
+ _setup_handlers(
889
+ logging.StreamHandler,
890
+ log,
891
+ )
892
+
893
+ if log_file_name is not None:
894
+ _setup_handlers(
895
+ lambda: logging.FileHandler(log_file_name),
896
+ log,
897
+ )
898
+
899
+ # configure artifact loggers, note: this must happen last
900
+ # since the levels of ancestor loggers are taken into account
901
+ for artifact_log_qname in log_registry.get_artifact_log_qnames():
902
+ log = logging.getLogger(artifact_log_qname)
903
+ configure_artifact_log(log)
904
+
905
+ # Setup handler for the special trace_log, with different default
906
+ # configuration
907
+ trace_dir_name = os.environ.get(TRACE_ENV_VAR, None)
908
+ # This handler may remove itself if trace_dir_name is None and we are not
909
+ # actually in an FB environment. This allows us to defer actually
910
+ # initializing it until we actually need to log anything. This is
911
+ # important because JK initializes a C++ singleton, which will pork our
912
+ # process if we subsequently fork.
913
+ handler = LazyTraceHandler(trace_dir_name)
914
+ # This log is ALWAYS at debug level. We will additionally test if there
915
+ # are any handlers before deciding to actually call logging on this. Do
916
+ # not manually call
917
+ trace_log.setLevel(logging.DEBUG)
918
+ trace_log_handler = _track_handler(handler)
919
+ trace_log_handler.setFormatter(TorchLogsFormatter(trace=True))
920
+ trace_log.addHandler(trace_log_handler)
921
+
922
+
923
+ class LazyTraceHandler(logging.StreamHandler):
924
+ """Like FileHandler, but the file is allocated lazily only upon the first log message"""
925
+
926
+ def __init__(self, root_dir: Optional[str]):
927
+ # This is implemented in the same way that delay is implemented on
928
+ # FileHandler
929
+ self.root_dir = root_dir
930
+ logging.Handler.__init__(self)
931
+ self.stream = None
932
+ self._builtin_open = open
933
+
934
+ # cloned from FileHandler in cpython
935
+ def close(self):
936
+ self.acquire()
937
+ try:
938
+ try:
939
+ if self.stream:
940
+ try:
941
+ self.flush()
942
+ finally:
943
+ stream = self.stream
944
+ self.stream = None
945
+ if hasattr(stream, "close"):
946
+ stream.close()
947
+ finally:
948
+ # Issue #19523: call unconditionally to
949
+ # prevent a handler leak when delay is set
950
+ # Also see Issue #42378: we also rely on
951
+ # self._closed being set to True there
952
+ logging.StreamHandler.close(self)
953
+ finally:
954
+ self.release()
955
+
956
+ def emit(self, record):
957
+ if self.stream is None:
958
+ ok = False
959
+ if self.root_dir is None:
960
+ TRACE_LOG_DIR = "/logs"
961
+ open_func = self._builtin_open
962
+
963
+ import torch.version as torch_version
964
+
965
+ if hasattr(torch_version, "git_version"):
966
+ log.info("LazyTraceHandler: disabled because not fbcode")
967
+ elif not torch._utils_internal.justknobs_check("pytorch/trace:enable"):
968
+ log.info(
969
+ "LazyTraceHandler: disabled because justknobs_check('pytorch/trace:enable') returned False"
970
+ )
971
+ elif not os.path.exists(TRACE_LOG_DIR):
972
+ log.info(
973
+ "LazyTraceHandler: disabled because %s does not exist",
974
+ TRACE_LOG_DIR,
975
+ )
976
+ elif not os.access(TRACE_LOG_DIR, os.W_OK):
977
+ log.info(
978
+ "LazyTraceHandler: disabled because %s is not writeable",
979
+ TRACE_LOG_DIR,
980
+ )
981
+ else:
982
+ self.root_dir = TRACE_LOG_DIR
983
+
984
+ if self.root_dir is not None:
985
+ os.makedirs(self.root_dir, exist_ok=True)
986
+ ranksuffix = ""
987
+ if dist.is_available() and dist.is_initialized():
988
+ ranksuffix = f"rank_{dist.get_rank()}_"
989
+ self.stream = tempfile.NamedTemporaryFile(
990
+ mode="w+",
991
+ suffix=".log",
992
+ prefix=f"dedicated_log_torch_trace_{ranksuffix}",
993
+ dir=self.root_dir,
994
+ delete=False,
995
+ )
996
+ log.info("LazyTraceHandler: logging to %s", self.stream.name)
997
+ else:
998
+ # We go poof, remove and no-op
999
+ trace_log.removeHandler(self)
1000
+ return
1001
+ if self.stream:
1002
+ super().emit(record)
1003
+
1004
+
1005
+ @functools.lru_cache(None)
1006
+ def warning_once(logger_obj, *args, **kwargs):
1007
+ """
1008
+ This function is similar to `logger.warning()`, but will emit the warning with the same message only once
1009
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
1010
+ The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
1011
+ another type of cache that includes the caller frame information in the hashing function.
1012
+ """
1013
+ logger_obj.warning(*args, **kwargs)
1014
+
1015
+
1016
+ class LazyString:
1017
+ def __init__(self, func, *args, **kwargs):
1018
+ self.func = func
1019
+ self.args = args
1020
+ self.kwargs = kwargs
1021
+
1022
+ def __str__(self):
1023
+ return self.func(*self.args, **self.kwargs)
1024
+
1025
+
1026
+ def trace_structured(
1027
+ name: str,
1028
+ # NB: metadata expected to be dict so adding more info is forward compatible
1029
+ # Tuple[str, int] is a special case for string interning
1030
+ metadata_fn: Callable[[], Union[Dict[str, Any], Tuple[str, int]]] = dict,
1031
+ *,
1032
+ payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
1033
+ suppress_context: bool = False,
1034
+ ):
1035
+ """
1036
+ metadata is an arbitrary JSON compatible struct, but it's expected to not be
1037
+ too long (e.g., less than 1MB)
1038
+
1039
+ payload is an arbitrary string, which can be arbitrarily long (but expected to have
1040
+ newlines so no lines are too long)
1041
+ """
1042
+ assert "name" not in ["rank", "frame_id", "frame_compile_id", "attempt"]
1043
+ assert callable(
1044
+ metadata_fn
1045
+ ), f"metadata_fn should be callable, but got {type(metadata_fn)}"
1046
+ assert callable(
1047
+ payload_fn
1048
+ ), f"payload_fn should be callable, but got {type(payload_fn)}"
1049
+ # trace_log never propagates and is ALWAYS DEBUG, so also check that there
1050
+ # are handlers instead of checking the log level
1051
+ if trace_log.handlers:
1052
+ record: Dict[str, object] = {}
1053
+ record[name] = metadata_fn()
1054
+ if not suppress_context:
1055
+ # TODO: Actually, the rank probably should just be emitted once at
1056
+ # the top, and not repeatedly spammed in all the logs, since it
1057
+ # never changes and we assume no interleaving
1058
+ if dist.is_available() and dist.is_initialized():
1059
+ record["rank"] = dist.get_rank()
1060
+ if (
1061
+ trace_id := torch._guards.CompileContext.current_trace_id()
1062
+ ) is not None:
1063
+ record["frame_id"] = trace_id.compile_id.frame_id
1064
+ record["frame_compile_id"] = trace_id.compile_id.frame_compile_id
1065
+ record["attempt"] = trace_id.attempt
1066
+ payload = payload_fn()
1067
+ if payload is not None:
1068
+ if not isinstance(payload, str):
1069
+ if isinstance(payload, list):
1070
+ # special case to look better
1071
+ payload = "[\n" + ",\n".join(json.dumps(i) for i in payload) + "\n]"
1072
+ else:
1073
+ # force newlines so we are unlikely to overflow line limit
1074
+ payload = json.dumps(payload, indent=0)
1075
+ h = hashlib.md5()
1076
+ h.update(payload.encode("utf-8"))
1077
+ record["has_payload"] = h.hexdigest()
1078
+ trace_log.debug(
1079
+ "", extra={"metadata": record, "payload": payload}, stacklevel=2
1080
+ )
1081
+
1082
+
1083
+ import torch._guards
1084
+ import torch._utils_internal
1085
+ import torch.distributed as dist
venv/lib/python3.10/site-packages/torch/_logging/_registrations.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: B950
2
+ from ._internal import register_artifact, register_log
3
+
4
+ DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"]
5
+ DISTRIBUTED = [
6
+ "torch.distributed",
7
+ "torch._dynamo.backends.distributed",
8
+ "torch.nn.parallel.distributed",
9
+ ]
10
+
11
+ register_log("dynamo", ["torch._dynamo", *DYNAMIC])
12
+ register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"])
13
+ register_log("autograd", "torch.autograd")
14
+ register_log("inductor", ["torch._inductor", "torch._inductor.cudagraph_trees"])
15
+
16
+ register_artifact(
17
+ "cudagraphs",
18
+ "Logs information from wrapping inductor generated code with cudagraphs.",
19
+ )
20
+
21
+ register_log("dynamic", DYNAMIC)
22
+ register_log("torch", "torch")
23
+ register_log("distributed", DISTRIBUTED)
24
+ register_log(
25
+ "dist_c10d", ["torch.distributed.distributed_c10d", "torch.distributed.rendezvous"]
26
+ )
27
+ register_log(
28
+ "dist_ddp", ["torch.nn.parallel.distributed", "torch._dynamo.backends.distributed"]
29
+ )
30
+ register_log("dist_fsdp", ["torch.distributed.fsdp"])
31
+ register_log("onnx", "torch.onnx")
32
+ register_log("export", ["torch._dynamo", "torch.export", *DYNAMIC])
33
+
34
+ register_artifact(
35
+ "guards",
36
+ "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.",
37
+ visible=True,
38
+ )
39
+ register_artifact("verbose_guards", "", off_by_default=True)
40
+ register_artifact(
41
+ "bytecode",
42
+ "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.",
43
+ off_by_default=True,
44
+ )
45
+ register_artifact(
46
+ "graph",
47
+ "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ",
48
+ )
49
+ register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.")
50
+ register_artifact(
51
+ "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph."
52
+ )
53
+ register_artifact(
54
+ "trace_source",
55
+ "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`",
56
+ )
57
+ register_artifact(
58
+ "trace_call",
59
+ "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.",
60
+ )
61
+ register_artifact(
62
+ "aot_graphs",
63
+ "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor",
64
+ visible=True,
65
+ )
66
+ register_artifact(
67
+ "aot_joint_graph",
68
+ "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning",
69
+ )
70
+ register_artifact(
71
+ "post_grad_graphs",
72
+ "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes",
73
+ )
74
+ register_artifact(
75
+ "compiled_autograd",
76
+ "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.",
77
+ visible=True,
78
+ )
79
+ register_artifact(
80
+ "ddp_graphs",
81
+ "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
82
+ )
83
+ register_artifact(
84
+ "recompiles",
85
+ "Prints the reason why we recompiled a graph. Very, very useful.",
86
+ visible=True,
87
+ )
88
+ register_artifact(
89
+ "recompiles_verbose",
90
+ "Prints all guard checks that fail during a recompilation. "
91
+ "At runtime, Dynamo will stop at the first failed check for each failing guard. "
92
+ "So not all logged failing checks are actually ran by Dynamo.",
93
+ visible=True,
94
+ off_by_default=True,
95
+ )
96
+ register_artifact(
97
+ "graph_breaks",
98
+ "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance",
99
+ visible=True,
100
+ )
101
+ register_artifact(
102
+ "not_implemented",
103
+ "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to",
104
+ )
105
+ register_artifact(
106
+ "output_code",
107
+ "Prints the code that Inductor generates (either Triton or C++)",
108
+ off_by_default=True,
109
+ visible=True,
110
+ )
111
+ register_artifact(
112
+ "schedule",
113
+ "Inductor scheduler information. Useful if working on Inductor fusion algo",
114
+ off_by_default=True,
115
+ )
116
+ register_artifact("perf_hints", "", off_by_default=True)
117
+ register_artifact("onnx_diagnostics", "", off_by_default=True)
118
+ register_artifact(
119
+ "fusion",
120
+ "Detailed Inductor fusion decisions. More detailed than 'schedule'",
121
+ off_by_default=True,
122
+ )
123
+ register_artifact(
124
+ "overlap",
125
+ "Detailed Inductor compute/comm overlap decisions",
126
+ off_by_default=True,
127
+ )
128
+ register_artifact(
129
+ "sym_node",
130
+ "Logs extra info for various SymNode operations",
131
+ off_by_default=True,
132
+ )
133
+
134
+ register_artifact("custom_format_test_artifact", "Testing only", log_format="")
venv/lib/python3.10/site-packages/torch/_logging/structured.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for converting data types into structured JSON for dumping.
3
+ """
4
+
5
+ import traceback
6
+ from typing import Dict, Sequence
7
+
8
+ import torch._logging._internal
9
+
10
+
11
+ INTERN_TABLE: Dict[str, int] = {}
12
+
13
+
14
+ def intern_string(s: str) -> int:
15
+ r = INTERN_TABLE.get(s, None)
16
+ if r is None:
17
+ r = len(INTERN_TABLE)
18
+ INTERN_TABLE[s] = r
19
+ torch._logging._internal.trace_structured(
20
+ "str", lambda: (s, r), suppress_context=True
21
+ )
22
+ return r
23
+
24
+
25
+ def from_traceback(tb: Sequence[traceback.FrameSummary]) -> object:
26
+ r = []
27
+ for frame in tb:
28
+ # dict naming convention here coincides with
29
+ # python/combined_traceback.cpp
30
+ r.append(
31
+ {
32
+ "line": frame.lineno,
33
+ "name": frame.name,
34
+ "filename": intern_string(frame.filename),
35
+ }
36
+ )
37
+ return r
venv/lib/python3.10/site-packages/torch/_numpy/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from . import fft, linalg, random
4
+ from ._dtypes import * # noqa: F403
5
+ from ._funcs import * # noqa: F403
6
+ from ._getlimits import finfo, iinfo
7
+ from ._ndarray import (
8
+ array,
9
+ asarray,
10
+ ascontiguousarray,
11
+ can_cast,
12
+ from_dlpack,
13
+ ndarray,
14
+ newaxis,
15
+ result_type,
16
+ )
17
+ from ._ufuncs import * # noqa: F403
18
+ from ._util import AxisError, UFuncTypeError
19
+
20
+ # from . import testing
21
+
22
+ alltrue = all
23
+ sometrue = any
24
+
25
+ inf = float("inf")
26
+ nan = float("nan")
27
+ from math import pi, e # isort: skip
28
+
29
+ False_ = False
30
+ True_ = True
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc ADDED
Binary file (6.69 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc ADDED
Binary file (7.34 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
venv/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Export torch work functions for binary ufuncs, rename/tweak to match numpy.
4
+ This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module.
5
+ """
6
+
7
+ import torch
8
+
9
+ from torch import ( # noqa: F401
10
+ add, # noqa: F401
11
+ arctan2, # noqa: F401
12
+ bitwise_and, # noqa: F401
13
+ bitwise_left_shift as left_shift, # noqa: F401
14
+ bitwise_or, # noqa: F401
15
+ bitwise_right_shift as right_shift, # noqa: F401
16
+ bitwise_xor, # noqa: F401
17
+ copysign, # noqa: F401
18
+ divide, # noqa: F401
19
+ eq as equal, # noqa: F401
20
+ float_power, # noqa: F401
21
+ floor_divide, # noqa: F401
22
+ fmax, # noqa: F401
23
+ fmin, # noqa: F401
24
+ fmod, # noqa: F401
25
+ gcd, # noqa: F401
26
+ greater, # noqa: F401
27
+ greater_equal, # noqa: F401
28
+ heaviside, # noqa: F401
29
+ hypot, # noqa: F401
30
+ lcm, # noqa: F401
31
+ ldexp, # noqa: F401
32
+ less, # noqa: F401
33
+ less_equal, # noqa: F401
34
+ logaddexp, # noqa: F401
35
+ logaddexp2, # noqa: F401
36
+ logical_and, # noqa: F401
37
+ logical_or, # noqa: F401
38
+ logical_xor, # noqa: F401
39
+ maximum, # noqa: F401
40
+ minimum, # noqa: F401
41
+ multiply, # noqa: F401
42
+ nextafter, # noqa: F401
43
+ not_equal, # noqa: F401
44
+ pow as power, # noqa: F401
45
+ remainder, # noqa: F401
46
+ remainder as mod, # noqa: F401
47
+ subtract, # noqa: F401
48
+ true_divide, # noqa: F401
49
+ )
50
+
51
+ from . import _dtypes_impl, _util
52
+
53
+
54
+ # work around torch limitations w.r.t. numpy
55
+ def matmul(x, y):
56
+ # work around:
57
+ # - RuntimeError: expected scalar type Int but found Double
58
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool'
59
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
60
+ dtype = _dtypes_impl.result_type_impl(x, y)
61
+ is_bool = dtype == torch.bool
62
+ is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and (
63
+ x.is_cpu or y.is_cpu
64
+ )
65
+
66
+ work_dtype = dtype
67
+ if is_bool:
68
+ work_dtype = torch.uint8
69
+ if is_half:
70
+ work_dtype = torch.float32
71
+
72
+ x = _util.cast_if_needed(x, work_dtype)
73
+ y = _util.cast_if_needed(y, work_dtype)
74
+
75
+ result = torch.matmul(x, y)
76
+
77
+ if work_dtype != dtype:
78
+ result = result.to(dtype)
79
+
80
+ return result
81
+
82
+
83
+ # a stub implementation of divmod, should be improved after
84
+ # https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch
85
+ def divmod(x, y):
86
+ return x // y, x % y
venv/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py ADDED
@@ -0,0 +1,881 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+
5
+ # These two dicts are autogenerated with autogen/gen_dtypes.py,
6
+ # using numpy version 1.23.5.
7
+
8
+ _can_cast_dict = {
9
+ "no": {
10
+ torch.float16: {
11
+ torch.float16: True,
12
+ torch.float32: False,
13
+ torch.float64: False,
14
+ torch.complex64: False,
15
+ torch.complex128: False,
16
+ torch.uint8: False,
17
+ torch.int8: False,
18
+ torch.int16: False,
19
+ torch.int32: False,
20
+ torch.int64: False,
21
+ torch.bool: False,
22
+ },
23
+ torch.float32: {
24
+ torch.float16: False,
25
+ torch.float32: True,
26
+ torch.float64: False,
27
+ torch.complex64: False,
28
+ torch.complex128: False,
29
+ torch.uint8: False,
30
+ torch.int8: False,
31
+ torch.int16: False,
32
+ torch.int32: False,
33
+ torch.int64: False,
34
+ torch.bool: False,
35
+ },
36
+ torch.float64: {
37
+ torch.float16: False,
38
+ torch.float32: False,
39
+ torch.float64: True,
40
+ torch.complex64: False,
41
+ torch.complex128: False,
42
+ torch.uint8: False,
43
+ torch.int8: False,
44
+ torch.int16: False,
45
+ torch.int32: False,
46
+ torch.int64: False,
47
+ torch.bool: False,
48
+ },
49
+ torch.complex64: {
50
+ torch.float16: False,
51
+ torch.float32: False,
52
+ torch.float64: False,
53
+ torch.complex64: True,
54
+ torch.complex128: False,
55
+ torch.uint8: False,
56
+ torch.int8: False,
57
+ torch.int16: False,
58
+ torch.int32: False,
59
+ torch.int64: False,
60
+ torch.bool: False,
61
+ },
62
+ torch.complex128: {
63
+ torch.float16: False,
64
+ torch.float32: False,
65
+ torch.float64: False,
66
+ torch.complex64: False,
67
+ torch.complex128: True,
68
+ torch.uint8: False,
69
+ torch.int8: False,
70
+ torch.int16: False,
71
+ torch.int32: False,
72
+ torch.int64: False,
73
+ torch.bool: False,
74
+ },
75
+ torch.uint8: {
76
+ torch.float16: False,
77
+ torch.float32: False,
78
+ torch.float64: False,
79
+ torch.complex64: False,
80
+ torch.complex128: False,
81
+ torch.uint8: True,
82
+ torch.int8: False,
83
+ torch.int16: False,
84
+ torch.int32: False,
85
+ torch.int64: False,
86
+ torch.bool: False,
87
+ },
88
+ torch.int8: {
89
+ torch.float16: False,
90
+ torch.float32: False,
91
+ torch.float64: False,
92
+ torch.complex64: False,
93
+ torch.complex128: False,
94
+ torch.uint8: False,
95
+ torch.int8: True,
96
+ torch.int16: False,
97
+ torch.int32: False,
98
+ torch.int64: False,
99
+ torch.bool: False,
100
+ },
101
+ torch.int16: {
102
+ torch.float16: False,
103
+ torch.float32: False,
104
+ torch.float64: False,
105
+ torch.complex64: False,
106
+ torch.complex128: False,
107
+ torch.uint8: False,
108
+ torch.int8: False,
109
+ torch.int16: True,
110
+ torch.int32: False,
111
+ torch.int64: False,
112
+ torch.bool: False,
113
+ },
114
+ torch.int32: {
115
+ torch.float16: False,
116
+ torch.float32: False,
117
+ torch.float64: False,
118
+ torch.complex64: False,
119
+ torch.complex128: False,
120
+ torch.uint8: False,
121
+ torch.int8: False,
122
+ torch.int16: False,
123
+ torch.int32: True,
124
+ torch.int64: False,
125
+ torch.bool: False,
126
+ },
127
+ torch.int64: {
128
+ torch.float16: False,
129
+ torch.float32: False,
130
+ torch.float64: False,
131
+ torch.complex64: False,
132
+ torch.complex128: False,
133
+ torch.uint8: False,
134
+ torch.int8: False,
135
+ torch.int16: False,
136
+ torch.int32: False,
137
+ torch.int64: True,
138
+ torch.bool: False,
139
+ },
140
+ torch.bool: {
141
+ torch.float16: False,
142
+ torch.float32: False,
143
+ torch.float64: False,
144
+ torch.complex64: False,
145
+ torch.complex128: False,
146
+ torch.uint8: False,
147
+ torch.int8: False,
148
+ torch.int16: False,
149
+ torch.int32: False,
150
+ torch.int64: False,
151
+ torch.bool: True,
152
+ },
153
+ },
154
+ "equiv": {
155
+ torch.float16: {
156
+ torch.float16: True,
157
+ torch.float32: False,
158
+ torch.float64: False,
159
+ torch.complex64: False,
160
+ torch.complex128: False,
161
+ torch.uint8: False,
162
+ torch.int8: False,
163
+ torch.int16: False,
164
+ torch.int32: False,
165
+ torch.int64: False,
166
+ torch.bool: False,
167
+ },
168
+ torch.float32: {
169
+ torch.float16: False,
170
+ torch.float32: True,
171
+ torch.float64: False,
172
+ torch.complex64: False,
173
+ torch.complex128: False,
174
+ torch.uint8: False,
175
+ torch.int8: False,
176
+ torch.int16: False,
177
+ torch.int32: False,
178
+ torch.int64: False,
179
+ torch.bool: False,
180
+ },
181
+ torch.float64: {
182
+ torch.float16: False,
183
+ torch.float32: False,
184
+ torch.float64: True,
185
+ torch.complex64: False,
186
+ torch.complex128: False,
187
+ torch.uint8: False,
188
+ torch.int8: False,
189
+ torch.int16: False,
190
+ torch.int32: False,
191
+ torch.int64: False,
192
+ torch.bool: False,
193
+ },
194
+ torch.complex64: {
195
+ torch.float16: False,
196
+ torch.float32: False,
197
+ torch.float64: False,
198
+ torch.complex64: True,
199
+ torch.complex128: False,
200
+ torch.uint8: False,
201
+ torch.int8: False,
202
+ torch.int16: False,
203
+ torch.int32: False,
204
+ torch.int64: False,
205
+ torch.bool: False,
206
+ },
207
+ torch.complex128: {
208
+ torch.float16: False,
209
+ torch.float32: False,
210
+ torch.float64: False,
211
+ torch.complex64: False,
212
+ torch.complex128: True,
213
+ torch.uint8: False,
214
+ torch.int8: False,
215
+ torch.int16: False,
216
+ torch.int32: False,
217
+ torch.int64: False,
218
+ torch.bool: False,
219
+ },
220
+ torch.uint8: {
221
+ torch.float16: False,
222
+ torch.float32: False,
223
+ torch.float64: False,
224
+ torch.complex64: False,
225
+ torch.complex128: False,
226
+ torch.uint8: True,
227
+ torch.int8: False,
228
+ torch.int16: False,
229
+ torch.int32: False,
230
+ torch.int64: False,
231
+ torch.bool: False,
232
+ },
233
+ torch.int8: {
234
+ torch.float16: False,
235
+ torch.float32: False,
236
+ torch.float64: False,
237
+ torch.complex64: False,
238
+ torch.complex128: False,
239
+ torch.uint8: False,
240
+ torch.int8: True,
241
+ torch.int16: False,
242
+ torch.int32: False,
243
+ torch.int64: False,
244
+ torch.bool: False,
245
+ },
246
+ torch.int16: {
247
+ torch.float16: False,
248
+ torch.float32: False,
249
+ torch.float64: False,
250
+ torch.complex64: False,
251
+ torch.complex128: False,
252
+ torch.uint8: False,
253
+ torch.int8: False,
254
+ torch.int16: True,
255
+ torch.int32: False,
256
+ torch.int64: False,
257
+ torch.bool: False,
258
+ },
259
+ torch.int32: {
260
+ torch.float16: False,
261
+ torch.float32: False,
262
+ torch.float64: False,
263
+ torch.complex64: False,
264
+ torch.complex128: False,
265
+ torch.uint8: False,
266
+ torch.int8: False,
267
+ torch.int16: False,
268
+ torch.int32: True,
269
+ torch.int64: False,
270
+ torch.bool: False,
271
+ },
272
+ torch.int64: {
273
+ torch.float16: False,
274
+ torch.float32: False,
275
+ torch.float64: False,
276
+ torch.complex64: False,
277
+ torch.complex128: False,
278
+ torch.uint8: False,
279
+ torch.int8: False,
280
+ torch.int16: False,
281
+ torch.int32: False,
282
+ torch.int64: True,
283
+ torch.bool: False,
284
+ },
285
+ torch.bool: {
286
+ torch.float16: False,
287
+ torch.float32: False,
288
+ torch.float64: False,
289
+ torch.complex64: False,
290
+ torch.complex128: False,
291
+ torch.uint8: False,
292
+ torch.int8: False,
293
+ torch.int16: False,
294
+ torch.int32: False,
295
+ torch.int64: False,
296
+ torch.bool: True,
297
+ },
298
+ },
299
+ "safe": {
300
+ torch.float16: {
301
+ torch.float16: True,
302
+ torch.float32: True,
303
+ torch.float64: True,
304
+ torch.complex64: True,
305
+ torch.complex128: True,
306
+ torch.uint8: False,
307
+ torch.int8: False,
308
+ torch.int16: False,
309
+ torch.int32: False,
310
+ torch.int64: False,
311
+ torch.bool: False,
312
+ },
313
+ torch.float32: {
314
+ torch.float16: False,
315
+ torch.float32: True,
316
+ torch.float64: True,
317
+ torch.complex64: True,
318
+ torch.complex128: True,
319
+ torch.uint8: False,
320
+ torch.int8: False,
321
+ torch.int16: False,
322
+ torch.int32: False,
323
+ torch.int64: False,
324
+ torch.bool: False,
325
+ },
326
+ torch.float64: {
327
+ torch.float16: False,
328
+ torch.float32: False,
329
+ torch.float64: True,
330
+ torch.complex64: False,
331
+ torch.complex128: True,
332
+ torch.uint8: False,
333
+ torch.int8: False,
334
+ torch.int16: False,
335
+ torch.int32: False,
336
+ torch.int64: False,
337
+ torch.bool: False,
338
+ },
339
+ torch.complex64: {
340
+ torch.float16: False,
341
+ torch.float32: False,
342
+ torch.float64: False,
343
+ torch.complex64: True,
344
+ torch.complex128: True,
345
+ torch.uint8: False,
346
+ torch.int8: False,
347
+ torch.int16: False,
348
+ torch.int32: False,
349
+ torch.int64: False,
350
+ torch.bool: False,
351
+ },
352
+ torch.complex128: {
353
+ torch.float16: False,
354
+ torch.float32: False,
355
+ torch.float64: False,
356
+ torch.complex64: False,
357
+ torch.complex128: True,
358
+ torch.uint8: False,
359
+ torch.int8: False,
360
+ torch.int16: False,
361
+ torch.int32: False,
362
+ torch.int64: False,
363
+ torch.bool: False,
364
+ },
365
+ torch.uint8: {
366
+ torch.float16: True,
367
+ torch.float32: True,
368
+ torch.float64: True,
369
+ torch.complex64: True,
370
+ torch.complex128: True,
371
+ torch.uint8: True,
372
+ torch.int8: False,
373
+ torch.int16: True,
374
+ torch.int32: True,
375
+ torch.int64: True,
376
+ torch.bool: False,
377
+ },
378
+ torch.int8: {
379
+ torch.float16: True,
380
+ torch.float32: True,
381
+ torch.float64: True,
382
+ torch.complex64: True,
383
+ torch.complex128: True,
384
+ torch.uint8: False,
385
+ torch.int8: True,
386
+ torch.int16: True,
387
+ torch.int32: True,
388
+ torch.int64: True,
389
+ torch.bool: False,
390
+ },
391
+ torch.int16: {
392
+ torch.float16: False,
393
+ torch.float32: True,
394
+ torch.float64: True,
395
+ torch.complex64: True,
396
+ torch.complex128: True,
397
+ torch.uint8: False,
398
+ torch.int8: False,
399
+ torch.int16: True,
400
+ torch.int32: True,
401
+ torch.int64: True,
402
+ torch.bool: False,
403
+ },
404
+ torch.int32: {
405
+ torch.float16: False,
406
+ torch.float32: False,
407
+ torch.float64: True,
408
+ torch.complex64: False,
409
+ torch.complex128: True,
410
+ torch.uint8: False,
411
+ torch.int8: False,
412
+ torch.int16: False,
413
+ torch.int32: True,
414
+ torch.int64: True,
415
+ torch.bool: False,
416
+ },
417
+ torch.int64: {
418
+ torch.float16: False,
419
+ torch.float32: False,
420
+ torch.float64: True,
421
+ torch.complex64: False,
422
+ torch.complex128: True,
423
+ torch.uint8: False,
424
+ torch.int8: False,
425
+ torch.int16: False,
426
+ torch.int32: False,
427
+ torch.int64: True,
428
+ torch.bool: False,
429
+ },
430
+ torch.bool: {
431
+ torch.float16: True,
432
+ torch.float32: True,
433
+ torch.float64: True,
434
+ torch.complex64: True,
435
+ torch.complex128: True,
436
+ torch.uint8: True,
437
+ torch.int8: True,
438
+ torch.int16: True,
439
+ torch.int32: True,
440
+ torch.int64: True,
441
+ torch.bool: True,
442
+ },
443
+ },
444
+ "same_kind": {
445
+ torch.float16: {
446
+ torch.float16: True,
447
+ torch.float32: True,
448
+ torch.float64: True,
449
+ torch.complex64: True,
450
+ torch.complex128: True,
451
+ torch.uint8: False,
452
+ torch.int8: False,
453
+ torch.int16: False,
454
+ torch.int32: False,
455
+ torch.int64: False,
456
+ torch.bool: False,
457
+ },
458
+ torch.float32: {
459
+ torch.float16: True,
460
+ torch.float32: True,
461
+ torch.float64: True,
462
+ torch.complex64: True,
463
+ torch.complex128: True,
464
+ torch.uint8: False,
465
+ torch.int8: False,
466
+ torch.int16: False,
467
+ torch.int32: False,
468
+ torch.int64: False,
469
+ torch.bool: False,
470
+ },
471
+ torch.float64: {
472
+ torch.float16: True,
473
+ torch.float32: True,
474
+ torch.float64: True,
475
+ torch.complex64: True,
476
+ torch.complex128: True,
477
+ torch.uint8: False,
478
+ torch.int8: False,
479
+ torch.int16: False,
480
+ torch.int32: False,
481
+ torch.int64: False,
482
+ torch.bool: False,
483
+ },
484
+ torch.complex64: {
485
+ torch.float16: False,
486
+ torch.float32: False,
487
+ torch.float64: False,
488
+ torch.complex64: True,
489
+ torch.complex128: True,
490
+ torch.uint8: False,
491
+ torch.int8: False,
492
+ torch.int16: False,
493
+ torch.int32: False,
494
+ torch.int64: False,
495
+ torch.bool: False,
496
+ },
497
+ torch.complex128: {
498
+ torch.float16: False,
499
+ torch.float32: False,
500
+ torch.float64: False,
501
+ torch.complex64: True,
502
+ torch.complex128: True,
503
+ torch.uint8: False,
504
+ torch.int8: False,
505
+ torch.int16: False,
506
+ torch.int32: False,
507
+ torch.int64: False,
508
+ torch.bool: False,
509
+ },
510
+ torch.uint8: {
511
+ torch.float16: True,
512
+ torch.float32: True,
513
+ torch.float64: True,
514
+ torch.complex64: True,
515
+ torch.complex128: True,
516
+ torch.uint8: True,
517
+ torch.int8: True,
518
+ torch.int16: True,
519
+ torch.int32: True,
520
+ torch.int64: True,
521
+ torch.bool: False,
522
+ },
523
+ torch.int8: {
524
+ torch.float16: True,
525
+ torch.float32: True,
526
+ torch.float64: True,
527
+ torch.complex64: True,
528
+ torch.complex128: True,
529
+ torch.uint8: False,
530
+ torch.int8: True,
531
+ torch.int16: True,
532
+ torch.int32: True,
533
+ torch.int64: True,
534
+ torch.bool: False,
535
+ },
536
+ torch.int16: {
537
+ torch.float16: True,
538
+ torch.float32: True,
539
+ torch.float64: True,
540
+ torch.complex64: True,
541
+ torch.complex128: True,
542
+ torch.uint8: False,
543
+ torch.int8: True,
544
+ torch.int16: True,
545
+ torch.int32: True,
546
+ torch.int64: True,
547
+ torch.bool: False,
548
+ },
549
+ torch.int32: {
550
+ torch.float16: True,
551
+ torch.float32: True,
552
+ torch.float64: True,
553
+ torch.complex64: True,
554
+ torch.complex128: True,
555
+ torch.uint8: False,
556
+ torch.int8: True,
557
+ torch.int16: True,
558
+ torch.int32: True,
559
+ torch.int64: True,
560
+ torch.bool: False,
561
+ },
562
+ torch.int64: {
563
+ torch.float16: True,
564
+ torch.float32: True,
565
+ torch.float64: True,
566
+ torch.complex64: True,
567
+ torch.complex128: True,
568
+ torch.uint8: False,
569
+ torch.int8: True,
570
+ torch.int16: True,
571
+ torch.int32: True,
572
+ torch.int64: True,
573
+ torch.bool: False,
574
+ },
575
+ torch.bool: {
576
+ torch.float16: True,
577
+ torch.float32: True,
578
+ torch.float64: True,
579
+ torch.complex64: True,
580
+ torch.complex128: True,
581
+ torch.uint8: True,
582
+ torch.int8: True,
583
+ torch.int16: True,
584
+ torch.int32: True,
585
+ torch.int64: True,
586
+ torch.bool: True,
587
+ },
588
+ },
589
+ "unsafe": {
590
+ torch.float16: {
591
+ torch.float16: True,
592
+ torch.float32: True,
593
+ torch.float64: True,
594
+ torch.complex64: True,
595
+ torch.complex128: True,
596
+ torch.uint8: True,
597
+ torch.int8: True,
598
+ torch.int16: True,
599
+ torch.int32: True,
600
+ torch.int64: True,
601
+ torch.bool: True,
602
+ },
603
+ torch.float32: {
604
+ torch.float16: True,
605
+ torch.float32: True,
606
+ torch.float64: True,
607
+ torch.complex64: True,
608
+ torch.complex128: True,
609
+ torch.uint8: True,
610
+ torch.int8: True,
611
+ torch.int16: True,
612
+ torch.int32: True,
613
+ torch.int64: True,
614
+ torch.bool: True,
615
+ },
616
+ torch.float64: {
617
+ torch.float16: True,
618
+ torch.float32: True,
619
+ torch.float64: True,
620
+ torch.complex64: True,
621
+ torch.complex128: True,
622
+ torch.uint8: True,
623
+ torch.int8: True,
624
+ torch.int16: True,
625
+ torch.int32: True,
626
+ torch.int64: True,
627
+ torch.bool: True,
628
+ },
629
+ torch.complex64: {
630
+ torch.float16: True,
631
+ torch.float32: True,
632
+ torch.float64: True,
633
+ torch.complex64: True,
634
+ torch.complex128: True,
635
+ torch.uint8: True,
636
+ torch.int8: True,
637
+ torch.int16: True,
638
+ torch.int32: True,
639
+ torch.int64: True,
640
+ torch.bool: True,
641
+ },
642
+ torch.complex128: {
643
+ torch.float16: True,
644
+ torch.float32: True,
645
+ torch.float64: True,
646
+ torch.complex64: True,
647
+ torch.complex128: True,
648
+ torch.uint8: True,
649
+ torch.int8: True,
650
+ torch.int16: True,
651
+ torch.int32: True,
652
+ torch.int64: True,
653
+ torch.bool: True,
654
+ },
655
+ torch.uint8: {
656
+ torch.float16: True,
657
+ torch.float32: True,
658
+ torch.float64: True,
659
+ torch.complex64: True,
660
+ torch.complex128: True,
661
+ torch.uint8: True,
662
+ torch.int8: True,
663
+ torch.int16: True,
664
+ torch.int32: True,
665
+ torch.int64: True,
666
+ torch.bool: True,
667
+ },
668
+ torch.int8: {
669
+ torch.float16: True,
670
+ torch.float32: True,
671
+ torch.float64: True,
672
+ torch.complex64: True,
673
+ torch.complex128: True,
674
+ torch.uint8: True,
675
+ torch.int8: True,
676
+ torch.int16: True,
677
+ torch.int32: True,
678
+ torch.int64: True,
679
+ torch.bool: True,
680
+ },
681
+ torch.int16: {
682
+ torch.float16: True,
683
+ torch.float32: True,
684
+ torch.float64: True,
685
+ torch.complex64: True,
686
+ torch.complex128: True,
687
+ torch.uint8: True,
688
+ torch.int8: True,
689
+ torch.int16: True,
690
+ torch.int32: True,
691
+ torch.int64: True,
692
+ torch.bool: True,
693
+ },
694
+ torch.int32: {
695
+ torch.float16: True,
696
+ torch.float32: True,
697
+ torch.float64: True,
698
+ torch.complex64: True,
699
+ torch.complex128: True,
700
+ torch.uint8: True,
701
+ torch.int8: True,
702
+ torch.int16: True,
703
+ torch.int32: True,
704
+ torch.int64: True,
705
+ torch.bool: True,
706
+ },
707
+ torch.int64: {
708
+ torch.float16: True,
709
+ torch.float32: True,
710
+ torch.float64: True,
711
+ torch.complex64: True,
712
+ torch.complex128: True,
713
+ torch.uint8: True,
714
+ torch.int8: True,
715
+ torch.int16: True,
716
+ torch.int32: True,
717
+ torch.int64: True,
718
+ torch.bool: True,
719
+ },
720
+ torch.bool: {
721
+ torch.float16: True,
722
+ torch.float32: True,
723
+ torch.float64: True,
724
+ torch.complex64: True,
725
+ torch.complex128: True,
726
+ torch.uint8: True,
727
+ torch.int8: True,
728
+ torch.int16: True,
729
+ torch.int32: True,
730
+ torch.int64: True,
731
+ torch.bool: True,
732
+ },
733
+ },
734
+ }
735
+
736
+
737
+ _result_type_dict = {
738
+ torch.float16: {
739
+ torch.float16: torch.float16,
740
+ torch.float32: torch.float32,
741
+ torch.float64: torch.float64,
742
+ torch.complex64: torch.complex64,
743
+ torch.complex128: torch.complex128,
744
+ torch.uint8: torch.float16,
745
+ torch.int8: torch.float16,
746
+ torch.int16: torch.float32,
747
+ torch.int32: torch.float64,
748
+ torch.int64: torch.float64,
749
+ torch.bool: torch.float16,
750
+ },
751
+ torch.float32: {
752
+ torch.float16: torch.float32,
753
+ torch.float32: torch.float32,
754
+ torch.float64: torch.float64,
755
+ torch.complex64: torch.complex64,
756
+ torch.complex128: torch.complex128,
757
+ torch.uint8: torch.float32,
758
+ torch.int8: torch.float32,
759
+ torch.int16: torch.float32,
760
+ torch.int32: torch.float64,
761
+ torch.int64: torch.float64,
762
+ torch.bool: torch.float32,
763
+ },
764
+ torch.float64: {
765
+ torch.float16: torch.float64,
766
+ torch.float32: torch.float64,
767
+ torch.float64: torch.float64,
768
+ torch.complex64: torch.complex128,
769
+ torch.complex128: torch.complex128,
770
+ torch.uint8: torch.float64,
771
+ torch.int8: torch.float64,
772
+ torch.int16: torch.float64,
773
+ torch.int32: torch.float64,
774
+ torch.int64: torch.float64,
775
+ torch.bool: torch.float64,
776
+ },
777
+ torch.complex64: {
778
+ torch.float16: torch.complex64,
779
+ torch.float32: torch.complex64,
780
+ torch.float64: torch.complex128,
781
+ torch.complex64: torch.complex64,
782
+ torch.complex128: torch.complex128,
783
+ torch.uint8: torch.complex64,
784
+ torch.int8: torch.complex64,
785
+ torch.int16: torch.complex64,
786
+ torch.int32: torch.complex128,
787
+ torch.int64: torch.complex128,
788
+ torch.bool: torch.complex64,
789
+ },
790
+ torch.complex128: {
791
+ torch.float16: torch.complex128,
792
+ torch.float32: torch.complex128,
793
+ torch.float64: torch.complex128,
794
+ torch.complex64: torch.complex128,
795
+ torch.complex128: torch.complex128,
796
+ torch.uint8: torch.complex128,
797
+ torch.int8: torch.complex128,
798
+ torch.int16: torch.complex128,
799
+ torch.int32: torch.complex128,
800
+ torch.int64: torch.complex128,
801
+ torch.bool: torch.complex128,
802
+ },
803
+ torch.uint8: {
804
+ torch.float16: torch.float16,
805
+ torch.float32: torch.float32,
806
+ torch.float64: torch.float64,
807
+ torch.complex64: torch.complex64,
808
+ torch.complex128: torch.complex128,
809
+ torch.uint8: torch.uint8,
810
+ torch.int8: torch.int16,
811
+ torch.int16: torch.int16,
812
+ torch.int32: torch.int32,
813
+ torch.int64: torch.int64,
814
+ torch.bool: torch.uint8,
815
+ },
816
+ torch.int8: {
817
+ torch.float16: torch.float16,
818
+ torch.float32: torch.float32,
819
+ torch.float64: torch.float64,
820
+ torch.complex64: torch.complex64,
821
+ torch.complex128: torch.complex128,
822
+ torch.uint8: torch.int16,
823
+ torch.int8: torch.int8,
824
+ torch.int16: torch.int16,
825
+ torch.int32: torch.int32,
826
+ torch.int64: torch.int64,
827
+ torch.bool: torch.int8,
828
+ },
829
+ torch.int16: {
830
+ torch.float16: torch.float32,
831
+ torch.float32: torch.float32,
832
+ torch.float64: torch.float64,
833
+ torch.complex64: torch.complex64,
834
+ torch.complex128: torch.complex128,
835
+ torch.uint8: torch.int16,
836
+ torch.int8: torch.int16,
837
+ torch.int16: torch.int16,
838
+ torch.int32: torch.int32,
839
+ torch.int64: torch.int64,
840
+ torch.bool: torch.int16,
841
+ },
842
+ torch.int32: {
843
+ torch.float16: torch.float64,
844
+ torch.float32: torch.float64,
845
+ torch.float64: torch.float64,
846
+ torch.complex64: torch.complex128,
847
+ torch.complex128: torch.complex128,
848
+ torch.uint8: torch.int32,
849
+ torch.int8: torch.int32,
850
+ torch.int16: torch.int32,
851
+ torch.int32: torch.int32,
852
+ torch.int64: torch.int64,
853
+ torch.bool: torch.int32,
854
+ },
855
+ torch.int64: {
856
+ torch.float16: torch.float64,
857
+ torch.float32: torch.float64,
858
+ torch.float64: torch.float64,
859
+ torch.complex64: torch.complex128,
860
+ torch.complex128: torch.complex128,
861
+ torch.uint8: torch.int64,
862
+ torch.int8: torch.int64,
863
+ torch.int16: torch.int64,
864
+ torch.int32: torch.int64,
865
+ torch.int64: torch.int64,
866
+ torch.bool: torch.int64,
867
+ },
868
+ torch.bool: {
869
+ torch.float16: torch.float16,
870
+ torch.float32: torch.float32,
871
+ torch.float64: torch.float64,
872
+ torch.complex64: torch.complex64,
873
+ torch.complex128: torch.complex128,
874
+ torch.uint8: torch.uint8,
875
+ torch.int8: torch.int8,
876
+ torch.int16: torch.int16,
877
+ torch.int32: torch.int32,
878
+ torch.int64: torch.int64,
879
+ torch.bool: torch.bool,
880
+ },
881
+ }
venv/lib/python3.10/site-packages/torch/_numpy/_dtypes.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ Define analogs of numpy dtypes supported by pytorch.
4
+ Define the scalar types and supported dtypes and numpy <--> torch dtype mappings.
5
+ """
6
+ import builtins
7
+
8
+ import torch
9
+
10
+ from . import _dtypes_impl
11
+
12
+
13
+ # ### Scalar types ###
14
+
15
+
16
+ class generic:
17
+ name = "generic"
18
+
19
+ def __new__(cls, value):
20
+ # NumPy scalars are modelled as 0-D arrays
21
+ # so a call to np.float32(4) produces a 0-D array.
22
+
23
+ from ._ndarray import asarray, ndarray
24
+
25
+ if isinstance(value, str) and value in ["inf", "nan"]:
26
+ value = {"inf": torch.inf, "nan": torch.nan}[value]
27
+
28
+ if isinstance(value, ndarray):
29
+ return value.astype(cls)
30
+ else:
31
+ return asarray(value, dtype=cls)
32
+
33
+
34
+ ##################
35
+ # abstract types #
36
+ ##################
37
+
38
+
39
+ class number(generic):
40
+ name = "number"
41
+
42
+
43
+ class integer(number):
44
+ name = "integer"
45
+
46
+
47
+ class inexact(number):
48
+ name = "inexact"
49
+
50
+
51
+ class signedinteger(integer):
52
+ name = "signedinteger"
53
+
54
+
55
+ class unsignedinteger(integer):
56
+ name = "unsignedinteger"
57
+
58
+
59
+ class floating(inexact):
60
+ name = "floating"
61
+
62
+
63
+ class complexfloating(inexact):
64
+ name = "complexfloating"
65
+
66
+
67
+ _abstract_dtypes = [
68
+ "generic",
69
+ "number",
70
+ "integer",
71
+ "signedinteger",
72
+ "unsignedinteger",
73
+ "inexact",
74
+ "floating",
75
+ "complexfloating",
76
+ ]
77
+
78
+ # ##### concrete types
79
+
80
+ # signed integers
81
+
82
+
83
+ class int8(signedinteger):
84
+ name = "int8"
85
+ typecode = "b"
86
+ torch_dtype = torch.int8
87
+
88
+
89
+ class int16(signedinteger):
90
+ name = "int16"
91
+ typecode = "h"
92
+ torch_dtype = torch.int16
93
+
94
+
95
+ class int32(signedinteger):
96
+ name = "int32"
97
+ typecode = "i"
98
+ torch_dtype = torch.int32
99
+
100
+
101
+ class int64(signedinteger):
102
+ name = "int64"
103
+ typecode = "l"
104
+ torch_dtype = torch.int64
105
+
106
+
107
+ # unsigned integers
108
+
109
+
110
+ class uint8(unsignedinteger):
111
+ name = "uint8"
112
+ typecode = "B"
113
+ torch_dtype = torch.uint8
114
+
115
+
116
+ # floating point
117
+
118
+
119
+ class float16(floating):
120
+ name = "float16"
121
+ typecode = "e"
122
+ torch_dtype = torch.float16
123
+
124
+
125
+ class float32(floating):
126
+ name = "float32"
127
+ typecode = "f"
128
+ torch_dtype = torch.float32
129
+
130
+
131
+ class float64(floating):
132
+ name = "float64"
133
+ typecode = "d"
134
+ torch_dtype = torch.float64
135
+
136
+
137
+ class complex64(complexfloating):
138
+ name = "complex64"
139
+ typecode = "F"
140
+ torch_dtype = torch.complex64
141
+
142
+
143
+ class complex128(complexfloating):
144
+ name = "complex128"
145
+ typecode = "D"
146
+ torch_dtype = torch.complex128
147
+
148
+
149
+ class bool_(generic):
150
+ name = "bool_"
151
+ typecode = "?"
152
+ torch_dtype = torch.bool
153
+
154
+
155
+ # name aliases
156
+ _name_aliases = {
157
+ "intp": int64,
158
+ "int_": int64,
159
+ "intc": int32,
160
+ "byte": int8,
161
+ "short": int16,
162
+ "longlong": int64, # XXX: is this correct?
163
+ "ubyte": uint8,
164
+ "half": float16,
165
+ "single": float32,
166
+ "double": float64,
167
+ "float_": float64,
168
+ "csingle": complex64,
169
+ "singlecomplex": complex64,
170
+ "cdouble": complex128,
171
+ "cfloat": complex128,
172
+ "complex_": complex128,
173
+ }
174
+ # We register float_ = float32 and so on
175
+ for name, obj in _name_aliases.items():
176
+ vars()[name] = obj
177
+
178
+
179
+ # Replicate this NumPy-defined way of grouping scalar types,
180
+ # cf tests/core/test_scalar_methods.py
181
+ sctypes = {
182
+ "int": [int8, int16, int32, int64],
183
+ "uint": [uint8],
184
+ "float": [float16, float32, float64],
185
+ "complex": [complex64, complex128],
186
+ "others": [bool_],
187
+ }
188
+
189
+
190
+ # Support mappings/functions
191
+
192
+ _names = {st.name: st for cat in sctypes for st in sctypes[cat]}
193
+ _typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]}
194
+ _torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]}
195
+
196
+
197
+ _aliases = {
198
+ "u1": uint8,
199
+ "i1": int8,
200
+ "i2": int16,
201
+ "i4": int32,
202
+ "i8": int64,
203
+ "b": int8, # XXX: srsly?
204
+ "f2": float16,
205
+ "f4": float32,
206
+ "f8": float64,
207
+ "c8": complex64,
208
+ "c16": complex128,
209
+ # numpy-specific trailing underscore
210
+ "bool_": bool_,
211
+ }
212
+
213
+
214
+ _python_types = {
215
+ int: int64,
216
+ float: float64,
217
+ complex: complex128,
218
+ builtins.bool: bool_,
219
+ # also allow stringified names of python types
220
+ int.__name__: int64,
221
+ float.__name__: float64,
222
+ complex.__name__: complex128,
223
+ builtins.bool.__name__: bool_,
224
+ }
225
+
226
+
227
+ def sctype_from_string(s):
228
+ """Normalize a string value: a type 'name' or a typecode or a width alias."""
229
+ if s in _names:
230
+ return _names[s]
231
+ if s in _name_aliases.keys():
232
+ return _name_aliases[s]
233
+ if s in _typecodes:
234
+ return _typecodes[s]
235
+ if s in _aliases:
236
+ return _aliases[s]
237
+ if s in _python_types:
238
+ return _python_types[s]
239
+ raise TypeError(f"data type {s!r} not understood")
240
+
241
+
242
+ def sctype_from_torch_dtype(torch_dtype):
243
+ return _torch_dtypes[torch_dtype]
244
+
245
+
246
+ # ### DTypes. ###
247
+
248
+
249
+ def dtype(arg):
250
+ if arg is None:
251
+ arg = _dtypes_impl.default_dtypes().float_dtype
252
+ return DType(arg)
253
+
254
+
255
+ class DType:
256
+ def __init__(self, arg):
257
+ # a pytorch object?
258
+ if isinstance(arg, torch.dtype):
259
+ sctype = _torch_dtypes[arg]
260
+ elif isinstance(arg, torch.Tensor):
261
+ sctype = _torch_dtypes[arg.dtype]
262
+ # a scalar type?
263
+ elif issubclass_(arg, generic):
264
+ sctype = arg
265
+ # a dtype already?
266
+ elif isinstance(arg, DType):
267
+ sctype = arg._scalar_type
268
+ # a has a right attribute?
269
+ elif hasattr(arg, "dtype"):
270
+ sctype = arg.dtype._scalar_type
271
+ else:
272
+ sctype = sctype_from_string(arg)
273
+ self._scalar_type = sctype
274
+
275
+ @property
276
+ def name(self):
277
+ return self._scalar_type.name
278
+
279
+ @property
280
+ def type(self):
281
+ return self._scalar_type
282
+
283
+ @property
284
+ def kind(self):
285
+ # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
286
+ return _torch_dtypes[self.torch_dtype].name[0]
287
+
288
+ @property
289
+ def typecode(self):
290
+ return self._scalar_type.typecode
291
+
292
+ def __eq__(self, other):
293
+ if isinstance(other, DType):
294
+ return self._scalar_type == other._scalar_type
295
+ try:
296
+ other_instance = DType(other)
297
+ except TypeError:
298
+ return False
299
+ return self._scalar_type == other_instance._scalar_type
300
+
301
+ @property
302
+ def torch_dtype(self):
303
+ return self._scalar_type.torch_dtype
304
+
305
+ def __hash__(self):
306
+ return hash(self._scalar_type.name)
307
+
308
+ def __repr__(self):
309
+ return f'dtype("{self.name}")'
310
+
311
+ __str__ = __repr__
312
+
313
+ @property
314
+ def itemsize(self):
315
+ elem = self.type(1)
316
+ return elem.tensor.element_size()
317
+
318
+ def __getstate__(self):
319
+ return self._scalar_type
320
+
321
+ def __setstate__(self, value):
322
+ self._scalar_type = value
323
+
324
+
325
+ typecodes = {
326
+ "All": "efdFDBbhil?",
327
+ "AllFloat": "efdFD",
328
+ "AllInteger": "Bbhil",
329
+ "Integer": "bhil",
330
+ "UnsignedInteger": "B",
331
+ "Float": "efd",
332
+ "Complex": "FD",
333
+ }
334
+
335
+
336
+ # ### Defaults and dtype discovery
337
+
338
+
339
+ def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
340
+ """Set the (global) defaults for fp, complex, and int dtypes.
341
+
342
+ The complex dtype is inferred from the float (fp) dtype. It has
343
+ a width at least twice the width of the float dtype,
344
+ i.e., it's complex128 for float64 and complex64 for float32.
345
+
346
+ Parameters
347
+ ----------
348
+ fp_dtype
349
+ Allowed values are "numpy", "pytorch" or dtype_like things which
350
+ can be converted into a DType instance.
351
+ Default is "numpy" (i.e. float64).
352
+ int_dtype
353
+ Allowed values are "numpy", "pytorch" or dtype_like things which
354
+ can be converted into a DType instance.
355
+ Default is "numpy" (i.e. int64).
356
+
357
+ Returns
358
+ -------
359
+ The old default dtype state: a namedtuple with attributes ``float_dtype``,
360
+ ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch*
361
+ dtypes.
362
+
363
+ Notes
364
+ ------------
365
+ This functions has a side effect: it sets the global state with the provided dtypes.
366
+
367
+ The complex dtype has bit width of at least twice the width of the float
368
+ dtype, i.e. it's complex128 for float64 and complex64 for float32.
369
+
370
+ """
371
+ if fp_dtype not in ["numpy", "pytorch"]:
372
+ fp_dtype = dtype(fp_dtype).torch_dtype
373
+ if int_dtype not in ["numpy", "pytorch"]:
374
+ int_dtype = dtype(int_dtype).torch_dtype
375
+
376
+ if fp_dtype == "numpy":
377
+ float_dtype = torch.float64
378
+ elif fp_dtype == "pytorch":
379
+ float_dtype = torch.float32
380
+ else:
381
+ float_dtype = fp_dtype
382
+
383
+ complex_dtype = {
384
+ torch.float64: torch.complex128,
385
+ torch.float32: torch.complex64,
386
+ torch.float16: torch.complex64,
387
+ }[float_dtype]
388
+
389
+ if int_dtype in ["numpy", "pytorch"]:
390
+ int_dtype = torch.int64
391
+ else:
392
+ int_dtype = int_dtype
393
+
394
+ new_defaults = _dtypes_impl.DefaultDTypes(
395
+ float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype
396
+ )
397
+
398
+ # set the new global state and return the old state
399
+ old_defaults = _dtypes_impl.default_dtypes
400
+ _dtypes_impl._default_dtypes = new_defaults
401
+ return old_defaults
402
+
403
+
404
+ def issubclass_(arg, klass):
405
+ try:
406
+ return issubclass(arg, klass)
407
+ except TypeError:
408
+ return False
409
+
410
+
411
+ def issubdtype(arg1, arg2):
412
+ # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420
413
+
414
+ # We also accept strings even if NumPy doesn't as dtypes are serialized as their
415
+ # string representation in dynamo's graph
416
+ def str_to_abstract(t):
417
+ if isinstance(t, str) and t in _abstract_dtypes:
418
+ return globals()[t]
419
+ return t
420
+
421
+ arg1 = str_to_abstract(arg1)
422
+ arg2 = str_to_abstract(arg2)
423
+
424
+ if not issubclass_(arg1, generic):
425
+ arg1 = dtype(arg1).type
426
+ if not issubclass_(arg2, generic):
427
+ arg2 = dtype(arg2).type
428
+ return issubclass(arg1, arg2)
429
+
430
+
431
+ __all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"]
432
+ __all__ += list(_names.keys()) # noqa: PLE0605
433
+ __all__ += list(_name_aliases.keys()) # noqa: PLE0605
434
+ __all__ += _abstract_dtypes # noqa: PLE0605
venv/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Dtypes/scalar type implementaions with torch dtypes.
4
+
5
+ Here `dtype` is always a torch.dtype, this module knows nothing about
6
+ scalar types, wrapper dtypes or anything like that. PyTorch only.
7
+ """
8
+ from collections import namedtuple
9
+
10
+ import torch
11
+
12
+ # defaults : mimic NumPy, allow user control
13
+ DefaultDTypes = namedtuple(
14
+ "DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
15
+ )
16
+
17
+ # a global state
18
+ # We set it the first time we call default_dtypes() to avoid importing
19
+ # torch._dynamo.config and create a circular reference
20
+ _default_dtypes = None
21
+
22
+
23
+ def default_dtypes():
24
+ global _default_dtypes
25
+ if _default_dtypes is None:
26
+ import torch._dynamo.config as config
27
+
28
+ _default_dtypes = DefaultDTypes(
29
+ float_dtype=getattr(torch, config.numpy_default_float),
30
+ complex_dtype=getattr(torch, config.numpy_default_complex),
31
+ int_dtype=getattr(torch, config.numpy_default_int),
32
+ )
33
+ assert isinstance(_default_dtypes.float_dtype, torch.dtype)
34
+ assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
35
+ assert isinstance(_default_dtypes.int_dtype, torch.dtype)
36
+ return _default_dtypes
37
+
38
+
39
+ def get_default_dtype_for(dtype):
40
+ """Default scalar type given sctype category."""
41
+ if dtype == torch.bool:
42
+ return dtype
43
+ if dtype.is_complex:
44
+ return default_dtypes().complex_dtype
45
+ if dtype.is_floating_point:
46
+ return default_dtypes().float_dtype
47
+ # else, it must be (some) integer
48
+ return default_dtypes().int_dtype
49
+
50
+
51
+ from . import _casting_dicts as _cd
52
+
53
+
54
+ def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
55
+ return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
56
+
57
+
58
+ def result_type_impl(*tensors):
59
+ # NB: torch dtypes here
60
+ dtyp = tensors[0].dtype
61
+ if len(tensors) == 1:
62
+ return dtyp
63
+
64
+ for curr in tensors[1:]:
65
+ dtyp = _cd._result_type_dict[dtyp][curr.dtype]
66
+
67
+ return dtyp
68
+
69
+
70
+ def python_type_for_torch(dtyp):
71
+ """Get a python scalar type a torch dtype"""
72
+ if dtyp.is_floating_point:
73
+ typ = float
74
+ elif dtyp.is_complex:
75
+ typ = complex
76
+ elif dtyp == torch.bool:
77
+ typ = bool
78
+ else:
79
+ typ = int
80
+ return typ
81
+
82
+
83
+ # ### NEP 50 helpers ###
84
+
85
+ _SCALAR_TYPES = (int, bool, float, complex)
86
+
87
+ _SCALAR_AND_SYMBOLIC_TYPES = (
88
+ *_SCALAR_TYPES,
89
+ torch.SymInt,
90
+ torch.SymFloat,
91
+ torch.SymBool,
92
+ )
93
+
94
+ _NEP50_FUNCS_TENSOR_ONLY = (
95
+ "minimum",
96
+ "maximum",
97
+ "logaddexp",
98
+ "logaddexp2",
99
+ "lcm",
100
+ "gcd",
101
+ "hypot",
102
+ "heaviside",
103
+ "fmod",
104
+ "fmin",
105
+ "fmax",
106
+ "copysign",
107
+ "arctan2",
108
+ )
109
+
110
+
111
+ def is_scalar(x):
112
+ return isinstance(x, _SCALAR_TYPES)
113
+
114
+
115
+ def is_scalar_or_symbolic(x):
116
+ return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES)
117
+
118
+
119
+ def _dtype_for_scalar(py_type):
120
+ return {
121
+ bool: torch.bool,
122
+ torch.SymBool: torch.bool,
123
+ int: torch.int64,
124
+ torch.SymInt: torch.int64,
125
+ float: torch.float64,
126
+ torch.SymFloat: torch.float64,
127
+ complex: torch.complex128,
128
+ }[py_type]
129
+
130
+
131
+ def _dtype_for_scalar_or_tensor(x):
132
+ return x.dtype if isinstance(x, torch.Tensor) else _dtype_for_scalar(type(x))
133
+
134
+
135
+ def is_float_or_fp_tensor(x):
136
+ return _dtype_for_scalar_or_tensor(x).is_floating_point
137
+
138
+
139
+ def is_complex_or_complex_tensor(x):
140
+ return _dtype_for_scalar_or_tensor(x).is_complex
141
+
142
+
143
+ def _category(dtype):
144
+ return {
145
+ torch.bool: 0,
146
+ torch.SymBool: 0,
147
+ # int
148
+ torch.uint8: 1,
149
+ torch.int8: 1,
150
+ torch.int16: 1,
151
+ torch.int32: 1,
152
+ torch.int64: 1,
153
+ torch.SymInt: 1,
154
+ # float
155
+ torch.float16: 2,
156
+ torch.float32: 2,
157
+ torch.float64: 2,
158
+ torch.SymFloat: 2,
159
+ # complex
160
+ torch.complex64: 3,
161
+ torch.complex128: 3,
162
+ }[dtype]
163
+
164
+
165
+ def nep50_to_tensors(x1, x2, handle_weaks, function_name):
166
+ """If either of inputs is a python scalar, type-promote with NEP 50."""
167
+
168
+ def to_tensor(scalar, dtype=None):
169
+ if dtype is None:
170
+ dtype = _dtype_for_scalar(type(scalar))
171
+ dtype = get_default_dtype_for(dtype)
172
+ return torch.as_tensor(scalar, dtype=dtype)
173
+
174
+ x1_is_weak = not isinstance(x1, torch.Tensor)
175
+ x2_is_weak = not isinstance(x2, torch.Tensor)
176
+ if not handle_weaks or (x1_is_weak and x2_is_weak):
177
+ x1 = to_tensor(x1) if x1_is_weak else x1
178
+ x2 = to_tensor(x2) if x2_is_weak else x2
179
+ return x1, x2
180
+
181
+ # scalar <op> tensor: NEP 50
182
+ assert x1_is_weak != x2_is_weak
183
+
184
+ weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
185
+
186
+ # find the dtype for the weak's type
187
+ weak_dtype = _dtype_for_scalar(type(weak))
188
+
189
+ cat_weak = _category(weak_dtype)
190
+ cat_not_weak = _category(not_weak.dtype)
191
+
192
+ dt = not_weak.dtype if cat_weak <= cat_not_weak else None
193
+
194
+ # special-case complex + float32
195
+ if weak_dtype.is_complex and not_weak.dtype == torch.float32:
196
+ dt = torch.complex64
197
+
198
+ # detect overflows: in PyTorch, uint8(-1) wraps around to 255,
199
+ # while NEP50 mandates an exception.
200
+ #
201
+ # Note that we only check if each element of the binop overflows,
202
+ # not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
203
+ # in uint8, but the result overflows and wrap around 255.
204
+ # Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
205
+ if cat_weak == 1 and cat_not_weak == 1:
206
+ # integers
207
+ iinfo = torch.iinfo(not_weak.dtype)
208
+ if not (iinfo.min <= weak <= iinfo.max):
209
+ raise OverflowError(
210
+ f"Python integer {weak} out of bounds for {not_weak.dtype}"
211
+ )
212
+ if weak_dtype != dt or function_name in _NEP50_FUNCS_TENSOR_ONLY:
213
+ # finally, can make `weak` into a 0D tensor, if both parameters are required to be tensor.
214
+ weak = to_tensor(weak, dt)
215
+
216
+ return (weak, not_weak) if x1_is_weak else (not_weak, weak)
venv/lib/python3.10/site-packages/torch/_numpy/_funcs.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import inspect
4
+ import itertools
5
+
6
+ from . import _funcs_impl, _reductions_impl
7
+ from ._normalizations import normalizer
8
+
9
+ # _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents,
10
+ # and consume/return PyTorch tensors/dtypes.
11
+ # They are also type annotated.
12
+ # Pull these functions from _funcs_impl and decorate them with @normalizer, which
13
+ # - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`.
14
+ # - Maps NumPy dtypes to PyTorch dtypes
15
+ # - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple
16
+ # - Implements the semantics for the `out=` arg
17
+ # - Wraps back the outputs into `torch._numpy.ndarrays`
18
+
19
+
20
+ def _public_functions(mod):
21
+ def is_public_function(f):
22
+ return inspect.isfunction(f) and not f.__name__.startswith("_")
23
+
24
+ return inspect.getmembers(mod, is_public_function)
25
+
26
+
27
+ # We fill in __all__ in the loop below
28
+ __all__ = []
29
+
30
+ # decorate implementer functions with argument normalizers and export to the top namespace
31
+ for name, func in itertools.chain(
32
+ _public_functions(_funcs_impl), _public_functions(_reductions_impl)
33
+ ):
34
+ if name in ["percentile", "quantile", "median"]:
35
+ decorated = normalizer(func, promote_scalar_result=True)
36
+ elif name == "einsum":
37
+ # normalized manually
38
+ decorated = func
39
+ else:
40
+ decorated = normalizer(func)
41
+
42
+ decorated.__qualname__ = name
43
+ decorated.__name__ = name
44
+ vars()[name] = decorated
45
+ __all__.append(name)
46
+
47
+
48
+ """
49
+ Vendored objects from numpy.lib.index_tricks
50
+ """
51
+
52
+
53
+ class IndexExpression:
54
+ """
55
+ Written by Konrad Hinsen <[email protected]>
56
+ last revision: 1999-7-23
57
+
58
+ Cosmetic changes by T. Oliphant 2001
59
+ """
60
+
61
+ def __init__(self, maketuple):
62
+ self.maketuple = maketuple
63
+
64
+ def __getitem__(self, item):
65
+ if self.maketuple and not isinstance(item, tuple):
66
+ return (item,)
67
+ else:
68
+ return item
69
+
70
+
71
+ index_exp = IndexExpression(maketuple=True)
72
+ s_ = IndexExpression(maketuple=False)
73
+
74
+
75
+ __all__ += ["index_exp", "s_"]
venv/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py ADDED
@@ -0,0 +1,2053 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """A thin pytorch / numpy compat layer.
4
+
5
+ Things imported from here have numpy-compatible signatures but operate on
6
+ pytorch tensors.
7
+ """
8
+ # Contents of this module ends up in the main namespace via _funcs.py
9
+ # where type annotations are used in conjunction with the @normalizer decorator.
10
+ from __future__ import annotations
11
+
12
+ import builtins
13
+ import itertools
14
+ import operator
15
+ from typing import Optional, Sequence
16
+
17
+ import torch
18
+
19
+ from . import _dtypes_impl, _util
20
+ from ._normalizations import (
21
+ ArrayLike,
22
+ ArrayLikeOrScalar,
23
+ CastingModes,
24
+ DTypeLike,
25
+ NDArray,
26
+ NotImplementedType,
27
+ OutArray,
28
+ )
29
+
30
+
31
+ def copy(
32
+ a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False
33
+ ):
34
+ return a.clone()
35
+
36
+
37
+ def copyto(
38
+ dst: NDArray,
39
+ src: ArrayLike,
40
+ casting: Optional[CastingModes] = "same_kind",
41
+ where: NotImplementedType = None,
42
+ ):
43
+ (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting)
44
+ dst.copy_(src)
45
+
46
+
47
+ def atleast_1d(*arys: ArrayLike):
48
+ res = torch.atleast_1d(*arys)
49
+ if isinstance(res, tuple):
50
+ return list(res)
51
+ else:
52
+ return res
53
+
54
+
55
+ def atleast_2d(*arys: ArrayLike):
56
+ res = torch.atleast_2d(*arys)
57
+ if isinstance(res, tuple):
58
+ return list(res)
59
+ else:
60
+ return res
61
+
62
+
63
+ def atleast_3d(*arys: ArrayLike):
64
+ res = torch.atleast_3d(*arys)
65
+ if isinstance(res, tuple):
66
+ return list(res)
67
+ else:
68
+ return res
69
+
70
+
71
+ def _concat_check(tup, dtype, out):
72
+ if tup == ():
73
+ raise ValueError("need at least one array to concatenate")
74
+
75
+ """Check inputs in concatenate et al."""
76
+ if out is not None and dtype is not None:
77
+ # mimic numpy
78
+ raise TypeError(
79
+ "concatenate() only takes `out` or `dtype` as an "
80
+ "argument, but both were provided."
81
+ )
82
+
83
+
84
+ def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"):
85
+ """Figure out dtypes, cast if necessary."""
86
+
87
+ if out is not None or dtype is not None:
88
+ # figure out the type of the inputs and outputs
89
+ out_dtype = out.dtype.torch_dtype if dtype is None else dtype
90
+ else:
91
+ out_dtype = _dtypes_impl.result_type_impl(*tensors)
92
+
93
+ # cast input arrays if necessary; do not broadcast them agains `out`
94
+ tensors = _util.typecast_tensors(tensors, out_dtype, casting)
95
+
96
+ return tensors
97
+
98
+
99
+ def _concatenate(
100
+ tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind"
101
+ ):
102
+ # pure torch implementation, used below and in cov/corrcoef below
103
+ tensors, axis = _util.axis_none_flatten(*tensors, axis=axis)
104
+ tensors = _concat_cast_helper(tensors, out, dtype, casting)
105
+ return torch.cat(tensors, axis)
106
+
107
+
108
+ def concatenate(
109
+ ar_tuple: Sequence[ArrayLike],
110
+ axis=0,
111
+ out: Optional[OutArray] = None,
112
+ dtype: Optional[DTypeLike] = None,
113
+ casting: Optional[CastingModes] = "same_kind",
114
+ ):
115
+ _concat_check(ar_tuple, dtype, out=out)
116
+ result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting)
117
+ return result
118
+
119
+
120
+ def vstack(
121
+ tup: Sequence[ArrayLike],
122
+ *,
123
+ dtype: Optional[DTypeLike] = None,
124
+ casting: Optional[CastingModes] = "same_kind",
125
+ ):
126
+ _concat_check(tup, dtype, out=None)
127
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
128
+ return torch.vstack(tensors)
129
+
130
+
131
+ row_stack = vstack
132
+
133
+
134
+ def hstack(
135
+ tup: Sequence[ArrayLike],
136
+ *,
137
+ dtype: Optional[DTypeLike] = None,
138
+ casting: Optional[CastingModes] = "same_kind",
139
+ ):
140
+ _concat_check(tup, dtype, out=None)
141
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
142
+ return torch.hstack(tensors)
143
+
144
+
145
+ def dstack(
146
+ tup: Sequence[ArrayLike],
147
+ *,
148
+ dtype: Optional[DTypeLike] = None,
149
+ casting: Optional[CastingModes] = "same_kind",
150
+ ):
151
+ # XXX: in numpy 1.24 dstack does not have dtype and casting keywords
152
+ # but {h,v}stack do. Hence add them here for consistency.
153
+ _concat_check(tup, dtype, out=None)
154
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
155
+ return torch.dstack(tensors)
156
+
157
+
158
+ def column_stack(
159
+ tup: Sequence[ArrayLike],
160
+ *,
161
+ dtype: Optional[DTypeLike] = None,
162
+ casting: Optional[CastingModes] = "same_kind",
163
+ ):
164
+ # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords
165
+ # but row_stack does. (because row_stack is an alias for vstack, really).
166
+ # Hence add these keywords here for consistency.
167
+ _concat_check(tup, dtype, out=None)
168
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
169
+ return torch.column_stack(tensors)
170
+
171
+
172
+ def stack(
173
+ arrays: Sequence[ArrayLike],
174
+ axis=0,
175
+ out: Optional[OutArray] = None,
176
+ *,
177
+ dtype: Optional[DTypeLike] = None,
178
+ casting: Optional[CastingModes] = "same_kind",
179
+ ):
180
+ _concat_check(arrays, dtype, out=out)
181
+
182
+ tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting)
183
+ result_ndim = tensors[0].ndim + 1
184
+ axis = _util.normalize_axis_index(axis, result_ndim)
185
+ return torch.stack(tensors, axis=axis)
186
+
187
+
188
+ def append(arr: ArrayLike, values: ArrayLike, axis=None):
189
+ if axis is None:
190
+ if arr.ndim != 1:
191
+ arr = arr.flatten()
192
+ values = values.flatten()
193
+ axis = arr.ndim - 1
194
+ return _concatenate((arr, values), axis=axis)
195
+
196
+
197
+ # ### split ###
198
+
199
+
200
+ def _split_helper(tensor, indices_or_sections, axis, strict=False):
201
+ if isinstance(indices_or_sections, int):
202
+ return _split_helper_int(tensor, indices_or_sections, axis, strict)
203
+ elif isinstance(indices_or_sections, (list, tuple)):
204
+ # NB: drop split=..., it only applies to split_helper_int
205
+ return _split_helper_list(tensor, list(indices_or_sections), axis)
206
+ else:
207
+ raise TypeError("split_helper: ", type(indices_or_sections))
208
+
209
+
210
+ def _split_helper_int(tensor, indices_or_sections, axis, strict=False):
211
+ if not isinstance(indices_or_sections, int):
212
+ raise NotImplementedError("split: indices_or_sections")
213
+
214
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
215
+
216
+ # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n
217
+ l, n = tensor.shape[axis], indices_or_sections
218
+
219
+ if n <= 0:
220
+ raise ValueError()
221
+
222
+ if l % n == 0:
223
+ num, sz = n, l // n
224
+ lst = [sz] * num
225
+ else:
226
+ if strict:
227
+ raise ValueError("array split does not result in an equal division")
228
+
229
+ num, sz = l % n, l // n + 1
230
+ lst = [sz] * num
231
+
232
+ lst += [sz - 1] * (n - num)
233
+
234
+ return torch.split(tensor, lst, axis)
235
+
236
+
237
+ def _split_helper_list(tensor, indices_or_sections, axis):
238
+ if not isinstance(indices_or_sections, list):
239
+ raise NotImplementedError("split: indices_or_sections: list")
240
+ # numpy expects indices, while torch expects lengths of sections
241
+ # also, numpy appends zero-size arrays for indices above the shape[axis]
242
+ lst = [x for x in indices_or_sections if x <= tensor.shape[axis]]
243
+ num_extra = len(indices_or_sections) - len(lst)
244
+
245
+ lst.append(tensor.shape[axis])
246
+ lst = [
247
+ lst[0],
248
+ ] + [a - b for a, b in zip(lst[1:], lst[:-1])]
249
+ lst += [0] * num_extra
250
+
251
+ return torch.split(tensor, lst, axis)
252
+
253
+
254
+ def array_split(ary: ArrayLike, indices_or_sections, axis=0):
255
+ return _split_helper(ary, indices_or_sections, axis)
256
+
257
+
258
+ def split(ary: ArrayLike, indices_or_sections, axis=0):
259
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
260
+
261
+
262
+ def hsplit(ary: ArrayLike, indices_or_sections):
263
+ if ary.ndim == 0:
264
+ raise ValueError("hsplit only works on arrays of 1 or more dimensions")
265
+ axis = 1 if ary.ndim > 1 else 0
266
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
267
+
268
+
269
+ def vsplit(ary: ArrayLike, indices_or_sections):
270
+ if ary.ndim < 2:
271
+ raise ValueError("vsplit only works on arrays of 2 or more dimensions")
272
+ return _split_helper(ary, indices_or_sections, 0, strict=True)
273
+
274
+
275
+ def dsplit(ary: ArrayLike, indices_or_sections):
276
+ if ary.ndim < 3:
277
+ raise ValueError("dsplit only works on arrays of 3 or more dimensions")
278
+ return _split_helper(ary, indices_or_sections, 2, strict=True)
279
+
280
+
281
+ def kron(a: ArrayLike, b: ArrayLike):
282
+ return torch.kron(a, b)
283
+
284
+
285
+ def vander(x: ArrayLike, N=None, increasing=False):
286
+ return torch.vander(x, N, increasing)
287
+
288
+
289
+ # ### linspace, geomspace, logspace and arange ###
290
+
291
+
292
+ def linspace(
293
+ start: ArrayLike,
294
+ stop: ArrayLike,
295
+ num=50,
296
+ endpoint=True,
297
+ retstep=False,
298
+ dtype: Optional[DTypeLike] = None,
299
+ axis=0,
300
+ ):
301
+ if axis != 0 or retstep or not endpoint:
302
+ raise NotImplementedError
303
+ if dtype is None:
304
+ dtype = _dtypes_impl.default_dtypes().float_dtype
305
+ # XXX: raises TypeError if start or stop are not scalars
306
+ return torch.linspace(start, stop, num, dtype=dtype)
307
+
308
+
309
+ def geomspace(
310
+ start: ArrayLike,
311
+ stop: ArrayLike,
312
+ num=50,
313
+ endpoint=True,
314
+ dtype: Optional[DTypeLike] = None,
315
+ axis=0,
316
+ ):
317
+ if axis != 0 or not endpoint:
318
+ raise NotImplementedError
319
+ base = torch.pow(stop / start, 1.0 / (num - 1))
320
+ logbase = torch.log(base)
321
+ return torch.logspace(
322
+ torch.log(start) / logbase,
323
+ torch.log(stop) / logbase,
324
+ num,
325
+ base=base,
326
+ )
327
+
328
+
329
+ def logspace(
330
+ start,
331
+ stop,
332
+ num=50,
333
+ endpoint=True,
334
+ base=10.0,
335
+ dtype: Optional[DTypeLike] = None,
336
+ axis=0,
337
+ ):
338
+ if axis != 0 or not endpoint:
339
+ raise NotImplementedError
340
+ return torch.logspace(start, stop, num, base=base, dtype=dtype)
341
+
342
+
343
+ def arange(
344
+ start: Optional[ArrayLikeOrScalar] = None,
345
+ stop: Optional[ArrayLikeOrScalar] = None,
346
+ step: Optional[ArrayLikeOrScalar] = 1,
347
+ dtype: Optional[DTypeLike] = None,
348
+ *,
349
+ like: NotImplementedType = None,
350
+ ):
351
+ if step == 0:
352
+ raise ZeroDivisionError
353
+ if stop is None and start is None:
354
+ raise TypeError
355
+ if stop is None:
356
+ # XXX: this breaks if start is passed as a kwarg:
357
+ # arange(start=4) should raise (no stop) but doesn't
358
+ start, stop = 0, start
359
+ if start is None:
360
+ start = 0
361
+
362
+ # the dtype of the result
363
+ if dtype is None:
364
+ dtype = (
365
+ _dtypes_impl.default_dtypes().float_dtype
366
+ if any(_dtypes_impl.is_float_or_fp_tensor(x) for x in (start, stop, step))
367
+ else _dtypes_impl.default_dtypes().int_dtype
368
+ )
369
+ work_dtype = torch.float64 if dtype.is_complex else dtype
370
+
371
+ # RuntimeError: "lt_cpu" not implemented for 'ComplexFloat'. Fall back to eager.
372
+ if any(_dtypes_impl.is_complex_or_complex_tensor(x) for x in (start, stop, step)):
373
+ raise NotImplementedError
374
+
375
+ if (step > 0 and start > stop) or (step < 0 and start < stop):
376
+ # empty range
377
+ return torch.empty(0, dtype=dtype)
378
+
379
+ result = torch.arange(start, stop, step, dtype=work_dtype)
380
+ result = _util.cast_if_needed(result, dtype)
381
+ return result
382
+
383
+
384
+ # ### zeros/ones/empty/full ###
385
+
386
+
387
+ def empty(
388
+ shape,
389
+ dtype: Optional[DTypeLike] = None,
390
+ order: NotImplementedType = "C",
391
+ *,
392
+ like: NotImplementedType = None,
393
+ ):
394
+ if dtype is None:
395
+ dtype = _dtypes_impl.default_dtypes().float_dtype
396
+ return torch.empty(shape, dtype=dtype)
397
+
398
+
399
+ # NB: *_like functions deliberately deviate from numpy: it has subok=True
400
+ # as the default; we set subok=False and raise on anything else.
401
+
402
+
403
+ def empty_like(
404
+ prototype: ArrayLike,
405
+ dtype: Optional[DTypeLike] = None,
406
+ order: NotImplementedType = "K",
407
+ subok: NotImplementedType = False,
408
+ shape=None,
409
+ ):
410
+ result = torch.empty_like(prototype, dtype=dtype)
411
+ if shape is not None:
412
+ result = result.reshape(shape)
413
+ return result
414
+
415
+
416
+ def full(
417
+ shape,
418
+ fill_value: ArrayLike,
419
+ dtype: Optional[DTypeLike] = None,
420
+ order: NotImplementedType = "C",
421
+ *,
422
+ like: NotImplementedType = None,
423
+ ):
424
+ if isinstance(shape, int):
425
+ shape = (shape,)
426
+ if dtype is None:
427
+ dtype = fill_value.dtype
428
+ if not isinstance(shape, (tuple, list)):
429
+ shape = (shape,)
430
+ return torch.full(shape, fill_value, dtype=dtype)
431
+
432
+
433
+ def full_like(
434
+ a: ArrayLike,
435
+ fill_value,
436
+ dtype: Optional[DTypeLike] = None,
437
+ order: NotImplementedType = "K",
438
+ subok: NotImplementedType = False,
439
+ shape=None,
440
+ ):
441
+ # XXX: fill_value broadcasts
442
+ result = torch.full_like(a, fill_value, dtype=dtype)
443
+ if shape is not None:
444
+ result = result.reshape(shape)
445
+ return result
446
+
447
+
448
+ def ones(
449
+ shape,
450
+ dtype: Optional[DTypeLike] = None,
451
+ order: NotImplementedType = "C",
452
+ *,
453
+ like: NotImplementedType = None,
454
+ ):
455
+ if dtype is None:
456
+ dtype = _dtypes_impl.default_dtypes().float_dtype
457
+ return torch.ones(shape, dtype=dtype)
458
+
459
+
460
+ def ones_like(
461
+ a: ArrayLike,
462
+ dtype: Optional[DTypeLike] = None,
463
+ order: NotImplementedType = "K",
464
+ subok: NotImplementedType = False,
465
+ shape=None,
466
+ ):
467
+ result = torch.ones_like(a, dtype=dtype)
468
+ if shape is not None:
469
+ result = result.reshape(shape)
470
+ return result
471
+
472
+
473
+ def zeros(
474
+ shape,
475
+ dtype: Optional[DTypeLike] = None,
476
+ order: NotImplementedType = "C",
477
+ *,
478
+ like: NotImplementedType = None,
479
+ ):
480
+ if dtype is None:
481
+ dtype = _dtypes_impl.default_dtypes().float_dtype
482
+ return torch.zeros(shape, dtype=dtype)
483
+
484
+
485
+ def zeros_like(
486
+ a: ArrayLike,
487
+ dtype: Optional[DTypeLike] = None,
488
+ order: NotImplementedType = "K",
489
+ subok: NotImplementedType = False,
490
+ shape=None,
491
+ ):
492
+ result = torch.zeros_like(a, dtype=dtype)
493
+ if shape is not None:
494
+ result = result.reshape(shape)
495
+ return result
496
+
497
+
498
+ # ### cov & corrcoef ###
499
+
500
+
501
+ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True):
502
+ """Prepare inputs for cov and corrcoef."""
503
+
504
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636
505
+ if y_tensor is not None:
506
+ # make sure x and y are at least 2D
507
+ ndim_extra = 2 - x_tensor.ndim
508
+ if ndim_extra > 0:
509
+ x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape)
510
+ if not rowvar and x_tensor.shape[0] != 1:
511
+ x_tensor = x_tensor.mT
512
+ x_tensor = x_tensor.clone()
513
+
514
+ ndim_extra = 2 - y_tensor.ndim
515
+ if ndim_extra > 0:
516
+ y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape)
517
+ if not rowvar and y_tensor.shape[0] != 1:
518
+ y_tensor = y_tensor.mT
519
+ y_tensor = y_tensor.clone()
520
+
521
+ x_tensor = _concatenate((x_tensor, y_tensor), axis=0)
522
+
523
+ return x_tensor
524
+
525
+
526
+ def corrcoef(
527
+ x: ArrayLike,
528
+ y: Optional[ArrayLike] = None,
529
+ rowvar=True,
530
+ bias=None,
531
+ ddof=None,
532
+ *,
533
+ dtype: Optional[DTypeLike] = None,
534
+ ):
535
+ if bias is not None or ddof is not None:
536
+ # deprecated in NumPy
537
+ raise NotImplementedError
538
+ xy_tensor = _xy_helper_corrcoef(x, y, rowvar)
539
+
540
+ is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu
541
+ if is_half:
542
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
543
+ dtype = torch.float32
544
+
545
+ xy_tensor = _util.cast_if_needed(xy_tensor, dtype)
546
+ result = torch.corrcoef(xy_tensor)
547
+
548
+ if is_half:
549
+ result = result.to(torch.float16)
550
+
551
+ return result
552
+
553
+
554
+ def cov(
555
+ m: ArrayLike,
556
+ y: Optional[ArrayLike] = None,
557
+ rowvar=True,
558
+ bias=False,
559
+ ddof=None,
560
+ fweights: Optional[ArrayLike] = None,
561
+ aweights: Optional[ArrayLike] = None,
562
+ *,
563
+ dtype: Optional[DTypeLike] = None,
564
+ ):
565
+ m = _xy_helper_corrcoef(m, y, rowvar)
566
+
567
+ if ddof is None:
568
+ ddof = 1 if bias == 0 else 0
569
+
570
+ is_half = (m.dtype == torch.float16) and m.is_cpu
571
+ if is_half:
572
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
573
+ dtype = torch.float32
574
+
575
+ m = _util.cast_if_needed(m, dtype)
576
+ result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights)
577
+
578
+ if is_half:
579
+ result = result.to(torch.float16)
580
+
581
+ return result
582
+
583
+
584
+ def _conv_corr_impl(a, v, mode):
585
+ dt = _dtypes_impl.result_type_impl(a, v)
586
+ a = _util.cast_if_needed(a, dt)
587
+ v = _util.cast_if_needed(v, dt)
588
+
589
+ padding = v.shape[0] - 1 if mode == "full" else mode
590
+
591
+ if padding == "same" and v.shape[0] % 2 == 0:
592
+ # UserWarning: Using padding='same' with even kernel lengths and odd
593
+ # dilation may require a zero-padded copy of the input be created
594
+ # (Triggered internally at pytorch/aten/src/ATen/native/Convolution.cpp:1010.)
595
+ raise NotImplementedError("mode='same' and even-length weights")
596
+
597
+ # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights
598
+ aa = a[None, :]
599
+ vv = v[None, None, :]
600
+
601
+ result = torch.nn.functional.conv1d(aa, vv, padding=padding)
602
+
603
+ # torch returns a 2D result, numpy returns a 1D array
604
+ return result[0, :]
605
+
606
+
607
+ def convolve(a: ArrayLike, v: ArrayLike, mode="full"):
608
+ # NumPy: if v is longer than a, the arrays are swapped before computation
609
+ if a.shape[0] < v.shape[0]:
610
+ a, v = v, a
611
+
612
+ # flip the weights since numpy does and torch does not
613
+ v = torch.flip(v, (0,))
614
+
615
+ return _conv_corr_impl(a, v, mode)
616
+
617
+
618
+ def correlate(a: ArrayLike, v: ArrayLike, mode="valid"):
619
+ v = torch.conj_physical(v)
620
+ return _conv_corr_impl(a, v, mode)
621
+
622
+
623
+ # ### logic & element selection ###
624
+
625
+
626
+ def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0):
627
+ if x.numel() == 0:
628
+ # edge case allowed by numpy
629
+ x = x.new_empty(0, dtype=int)
630
+
631
+ int_dtype = _dtypes_impl.default_dtypes().int_dtype
632
+ (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe")
633
+
634
+ return torch.bincount(x, weights, minlength)
635
+
636
+
637
+ def where(
638
+ condition: ArrayLike,
639
+ x: Optional[ArrayLikeOrScalar] = None,
640
+ y: Optional[ArrayLikeOrScalar] = None,
641
+ /,
642
+ ):
643
+ if (x is None) != (y is None):
644
+ raise ValueError("either both or neither of x and y should be given")
645
+
646
+ if condition.dtype != torch.bool:
647
+ condition = condition.to(torch.bool)
648
+
649
+ if x is None and y is None:
650
+ result = torch.where(condition)
651
+ else:
652
+ result = torch.where(condition, x, y)
653
+ return result
654
+
655
+
656
+ # ###### module-level queries of object properties
657
+
658
+
659
+ def ndim(a: ArrayLike):
660
+ return a.ndim
661
+
662
+
663
+ def shape(a: ArrayLike):
664
+ return tuple(a.shape)
665
+
666
+
667
+ def size(a: ArrayLike, axis=None):
668
+ if axis is None:
669
+ return a.numel()
670
+ else:
671
+ return a.shape[axis]
672
+
673
+
674
+ # ###### shape manipulations and indexing
675
+
676
+
677
+ def expand_dims(a: ArrayLike, axis):
678
+ shape = _util.expand_shape(a.shape, axis)
679
+ return a.view(shape) # never copies
680
+
681
+
682
+ def flip(m: ArrayLike, axis=None):
683
+ # XXX: semantic difference: np.flip returns a view, torch.flip copies
684
+ if axis is None:
685
+ axis = tuple(range(m.ndim))
686
+ else:
687
+ axis = _util.normalize_axis_tuple(axis, m.ndim)
688
+ return torch.flip(m, axis)
689
+
690
+
691
+ def flipud(m: ArrayLike):
692
+ return torch.flipud(m)
693
+
694
+
695
+ def fliplr(m: ArrayLike):
696
+ return torch.fliplr(m)
697
+
698
+
699
+ def rot90(m: ArrayLike, k=1, axes=(0, 1)):
700
+ axes = _util.normalize_axis_tuple(axes, m.ndim)
701
+ return torch.rot90(m, k, axes)
702
+
703
+
704
+ # ### broadcasting and indices ###
705
+
706
+
707
+ def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False):
708
+ return torch.broadcast_to(array, size=shape)
709
+
710
+
711
+ # This is a function from tuples to tuples, so we just reuse it
712
+ from torch import broadcast_shapes
713
+
714
+
715
+ def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False):
716
+ return torch.broadcast_tensors(*args)
717
+
718
+
719
+ def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"):
720
+ ndim = len(xi)
721
+
722
+ if indexing not in ["xy", "ij"]:
723
+ raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
724
+
725
+ s0 = (1,) * ndim
726
+ output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)]
727
+
728
+ if indexing == "xy" and ndim > 1:
729
+ # switch first and second axis
730
+ output[0] = output[0].reshape((1, -1) + s0[2:])
731
+ output[1] = output[1].reshape((-1, 1) + s0[2:])
732
+
733
+ if not sparse:
734
+ # Return the full N-D matrix (not only the 1-D vector)
735
+ output = torch.broadcast_tensors(*output)
736
+
737
+ if copy:
738
+ output = [x.clone() for x in output]
739
+
740
+ return list(output) # match numpy, return a list
741
+
742
+
743
+ def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False):
744
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791
745
+ dimensions = tuple(dimensions)
746
+ N = len(dimensions)
747
+ shape = (1,) * N
748
+ if sparse:
749
+ res = tuple()
750
+ else:
751
+ res = torch.empty((N,) + dimensions, dtype=dtype)
752
+ for i, dim in enumerate(dimensions):
753
+ idx = torch.arange(dim, dtype=dtype).reshape(
754
+ shape[:i] + (dim,) + shape[i + 1 :]
755
+ )
756
+ if sparse:
757
+ res = res + (idx,)
758
+ else:
759
+ res[i] = idx
760
+ return res
761
+
762
+
763
+ # ### tri*-something ###
764
+
765
+
766
+ def tril(m: ArrayLike, k=0):
767
+ return torch.tril(m, k)
768
+
769
+
770
+ def triu(m: ArrayLike, k=0):
771
+ return torch.triu(m, k)
772
+
773
+
774
+ def tril_indices(n, k=0, m=None):
775
+ if m is None:
776
+ m = n
777
+ return torch.tril_indices(n, m, offset=k)
778
+
779
+
780
+ def triu_indices(n, k=0, m=None):
781
+ if m is None:
782
+ m = n
783
+ return torch.triu_indices(n, m, offset=k)
784
+
785
+
786
+ def tril_indices_from(arr: ArrayLike, k=0):
787
+ if arr.ndim != 2:
788
+ raise ValueError("input array must be 2-d")
789
+ # Return a tensor rather than a tuple to avoid a graphbreak
790
+ return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k)
791
+
792
+
793
+ def triu_indices_from(arr: ArrayLike, k=0):
794
+ if arr.ndim != 2:
795
+ raise ValueError("input array must be 2-d")
796
+ # Return a tensor rather than a tuple to avoid a graphbreak
797
+ return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k)
798
+
799
+
800
+ def tri(
801
+ N,
802
+ M=None,
803
+ k=0,
804
+ dtype: Optional[DTypeLike] = None,
805
+ *,
806
+ like: NotImplementedType = None,
807
+ ):
808
+ if M is None:
809
+ M = N
810
+ tensor = torch.ones((N, M), dtype=dtype)
811
+ return torch.tril(tensor, diagonal=k)
812
+
813
+
814
+ # ### equality, equivalence, allclose ###
815
+
816
+
817
+ def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
818
+ dtype = _dtypes_impl.result_type_impl(a, b)
819
+ a = _util.cast_if_needed(a, dtype)
820
+ b = _util.cast_if_needed(b, dtype)
821
+ return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
822
+
823
+
824
+ def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False):
825
+ dtype = _dtypes_impl.result_type_impl(a, b)
826
+ a = _util.cast_if_needed(a, dtype)
827
+ b = _util.cast_if_needed(b, dtype)
828
+ return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
829
+
830
+
831
+ def _tensor_equal(a1, a2, equal_nan=False):
832
+ # Implementation of array_equal/array_equiv.
833
+ if a1.shape != a2.shape:
834
+ return False
835
+ cond = a1 == a2
836
+ if equal_nan:
837
+ cond = cond | (torch.isnan(a1) & torch.isnan(a2))
838
+ return cond.all().item()
839
+
840
+
841
+ def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False):
842
+ return _tensor_equal(a1, a2, equal_nan=equal_nan)
843
+
844
+
845
+ def array_equiv(a1: ArrayLike, a2: ArrayLike):
846
+ # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not
847
+ try:
848
+ a1_t, a2_t = torch.broadcast_tensors(a1, a2)
849
+ except RuntimeError:
850
+ # failed to broadcast => not equivalent
851
+ return False
852
+ return _tensor_equal(a1_t, a2_t)
853
+
854
+
855
+ def nan_to_num(
856
+ x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None
857
+ ):
858
+ # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble'
859
+ if x.is_complex():
860
+ re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf)
861
+ im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf)
862
+ return re + 1j * im
863
+ else:
864
+ return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
865
+
866
+
867
+ # ### put/take_along_axis ###
868
+
869
+
870
+ def take(
871
+ a: ArrayLike,
872
+ indices: ArrayLike,
873
+ axis=None,
874
+ out: Optional[OutArray] = None,
875
+ mode: NotImplementedType = "raise",
876
+ ):
877
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
878
+ axis = _util.normalize_axis_index(axis, a.ndim)
879
+ idx = (slice(None),) * axis + (indices, ...)
880
+ result = a[idx]
881
+ return result
882
+
883
+
884
+ def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis):
885
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
886
+ axis = _util.normalize_axis_index(axis, arr.ndim)
887
+ return torch.take_along_dim(arr, indices, axis)
888
+
889
+
890
+ def put(
891
+ a: NDArray,
892
+ indices: ArrayLike,
893
+ values: ArrayLike,
894
+ mode: NotImplementedType = "raise",
895
+ ):
896
+ v = values.type(a.dtype)
897
+ # If indices is larger than v, expand v to at least the size of indices. Any
898
+ # unnecessary trailing elements are then trimmed.
899
+ if indices.numel() > v.numel():
900
+ ratio = (indices.numel() + v.numel() - 1) // v.numel()
901
+ v = v.unsqueeze(0).expand((ratio,) + v.shape)
902
+ # Trim unnecessary elements, regardless if v was expanded or not. Note
903
+ # np.put() trims v to match indices by default too.
904
+ if indices.numel() < v.numel():
905
+ v = v.flatten()
906
+ v = v[: indices.numel()]
907
+ a.put_(indices, v)
908
+ return None
909
+
910
+
911
+ def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis):
912
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
913
+ axis = _util.normalize_axis_index(axis, arr.ndim)
914
+
915
+ indices, values = torch.broadcast_tensors(indices, values)
916
+ values = _util.cast_if_needed(values, arr.dtype)
917
+ result = torch.scatter(arr, axis, indices, values)
918
+ arr.copy_(result.reshape(arr.shape))
919
+ return None
920
+
921
+
922
+ def choose(
923
+ a: ArrayLike,
924
+ choices: Sequence[ArrayLike],
925
+ out: Optional[OutArray] = None,
926
+ mode: NotImplementedType = "raise",
927
+ ):
928
+ # First, broadcast elements of `choices`
929
+ choices = torch.stack(torch.broadcast_tensors(*choices))
930
+
931
+ # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`:
932
+ # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939)
933
+ idx_list = [
934
+ torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1))
935
+ for i, dim in enumerate(choices.shape)
936
+ ]
937
+
938
+ idx_list[0] = a
939
+ return choices[idx_list].squeeze(0)
940
+
941
+
942
+ # ### unique et al ###
943
+
944
+
945
+ def unique(
946
+ ar: ArrayLike,
947
+ return_index: NotImplementedType = False,
948
+ return_inverse=False,
949
+ return_counts=False,
950
+ axis=None,
951
+ *,
952
+ equal_nan: NotImplementedType = True,
953
+ ):
954
+ (ar,), axis = _util.axis_none_flatten(ar, axis=axis)
955
+ axis = _util.normalize_axis_index(axis, ar.ndim)
956
+
957
+ result = torch.unique(
958
+ ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis
959
+ )
960
+
961
+ return result
962
+
963
+
964
+ def nonzero(a: ArrayLike):
965
+ return torch.nonzero(a, as_tuple=True)
966
+
967
+
968
+ def argwhere(a: ArrayLike):
969
+ return torch.argwhere(a)
970
+
971
+
972
+ def flatnonzero(a: ArrayLike):
973
+ return torch.flatten(a).nonzero(as_tuple=True)[0]
974
+
975
+
976
+ def clip(
977
+ a: ArrayLike,
978
+ min: Optional[ArrayLike] = None,
979
+ max: Optional[ArrayLike] = None,
980
+ out: Optional[OutArray] = None,
981
+ ):
982
+ return torch.clamp(a, min, max)
983
+
984
+
985
+ def repeat(a: ArrayLike, repeats: ArrayLikeOrScalar, axis=None):
986
+ return torch.repeat_interleave(a, repeats, axis)
987
+
988
+
989
+ def tile(A: ArrayLike, reps):
990
+ if isinstance(reps, int):
991
+ reps = (reps,)
992
+ return torch.tile(A, reps)
993
+
994
+
995
+ def resize(a: ArrayLike, new_shape=None):
996
+ # implementation vendored from
997
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497
998
+ if new_shape is None:
999
+ return a
1000
+
1001
+ if isinstance(new_shape, int):
1002
+ new_shape = (new_shape,)
1003
+
1004
+ a = a.flatten()
1005
+
1006
+ new_size = 1
1007
+ for dim_length in new_shape:
1008
+ new_size *= dim_length
1009
+ if dim_length < 0:
1010
+ raise ValueError("all elements of `new_shape` must be non-negative")
1011
+
1012
+ if a.numel() == 0 or new_size == 0:
1013
+ # First case must zero fill. The second would have repeats == 0.
1014
+ return torch.zeros(new_shape, dtype=a.dtype)
1015
+
1016
+ repeats = -(-new_size // a.numel()) # ceil division
1017
+ a = concatenate((a,) * repeats)[:new_size]
1018
+
1019
+ return reshape(a, new_shape)
1020
+
1021
+
1022
+ # ### diag et al ###
1023
+
1024
+
1025
+ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1):
1026
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1027
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1028
+ return torch.diagonal(a, offset, axis1, axis2)
1029
+
1030
+
1031
+ def trace(
1032
+ a: ArrayLike,
1033
+ offset=0,
1034
+ axis1=0,
1035
+ axis2=1,
1036
+ dtype: Optional[DTypeLike] = None,
1037
+ out: Optional[OutArray] = None,
1038
+ ):
1039
+ result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype)
1040
+ return result
1041
+
1042
+
1043
+ def eye(
1044
+ N,
1045
+ M=None,
1046
+ k=0,
1047
+ dtype: Optional[DTypeLike] = None,
1048
+ order: NotImplementedType = "C",
1049
+ *,
1050
+ like: NotImplementedType = None,
1051
+ ):
1052
+ if dtype is None:
1053
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1054
+ if M is None:
1055
+ M = N
1056
+ z = torch.zeros(N, M, dtype=dtype)
1057
+ z.diagonal(k).fill_(1)
1058
+ return z
1059
+
1060
+
1061
+ def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None):
1062
+ return torch.eye(n, dtype=dtype)
1063
+
1064
+
1065
+ def diag(v: ArrayLike, k=0):
1066
+ return torch.diag(v, k)
1067
+
1068
+
1069
+ def diagflat(v: ArrayLike, k=0):
1070
+ return torch.diagflat(v, k)
1071
+
1072
+
1073
+ def diag_indices(n, ndim=2):
1074
+ idx = torch.arange(n)
1075
+ return (idx,) * ndim
1076
+
1077
+
1078
+ def diag_indices_from(arr: ArrayLike):
1079
+ if not arr.ndim >= 2:
1080
+ raise ValueError("input array must be at least 2-d")
1081
+ # For more than d=2, the strided formula is only valid for arrays with
1082
+ # all dimensions equal, so we check first.
1083
+ s = arr.shape
1084
+ if s[1:] != s[:-1]:
1085
+ raise ValueError("All dimensions of input must be of equal length")
1086
+ return diag_indices(s[0], arr.ndim)
1087
+
1088
+
1089
+ def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False):
1090
+ if a.ndim < 2:
1091
+ raise ValueError("array must be at least 2-d")
1092
+ if val.numel() == 0 and not wrap:
1093
+ a.fill_diagonal_(val)
1094
+ return a
1095
+
1096
+ if val.ndim == 0:
1097
+ val = val.unsqueeze(0)
1098
+
1099
+ # torch.Tensor.fill_diagonal_ only accepts scalars
1100
+ # If the size of val is too large, then val is trimmed
1101
+ if a.ndim == 2:
1102
+ tall = a.shape[0] > a.shape[1]
1103
+ # wrap does nothing for wide matrices...
1104
+ if not wrap or not tall:
1105
+ # Never wraps
1106
+ diag = a.diagonal()
1107
+ diag.copy_(val[: diag.numel()])
1108
+ else:
1109
+ # wraps and tall... leaving one empty line between diagonals?!
1110
+ max_, min_ = a.shape
1111
+ idx = torch.arange(max_ - max_ // (min_ + 1))
1112
+ mod = idx % min_
1113
+ div = idx // min_
1114
+ a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()]
1115
+ else:
1116
+ idx = diag_indices_from(a)
1117
+ # a.shape = (n, n, ..., n)
1118
+ a[idx] = val[: a.shape[0]]
1119
+
1120
+ return a
1121
+
1122
+
1123
+ def vdot(a: ArrayLike, b: ArrayLike, /):
1124
+ # 1. torch only accepts 1D arrays, numpy flattens
1125
+ # 2. torch requires matching dtype, while numpy casts (?)
1126
+ t_a, t_b = torch.atleast_1d(a, b)
1127
+ if t_a.ndim > 1:
1128
+ t_a = t_a.flatten()
1129
+ if t_b.ndim > 1:
1130
+ t_b = t_b.flatten()
1131
+
1132
+ dtype = _dtypes_impl.result_type_impl(t_a, t_b)
1133
+ is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu)
1134
+ is_bool = dtype == torch.bool
1135
+
1136
+ # work around torch's "dot" not implemented for 'Half', 'Bool'
1137
+ if is_half:
1138
+ dtype = torch.float32
1139
+ elif is_bool:
1140
+ dtype = torch.uint8
1141
+
1142
+ t_a = _util.cast_if_needed(t_a, dtype)
1143
+ t_b = _util.cast_if_needed(t_b, dtype)
1144
+
1145
+ result = torch.vdot(t_a, t_b)
1146
+
1147
+ if is_half:
1148
+ result = result.to(torch.float16)
1149
+ elif is_bool:
1150
+ result = result.to(torch.bool)
1151
+
1152
+ return result
1153
+
1154
+
1155
+ def tensordot(a: ArrayLike, b: ArrayLike, axes=2):
1156
+ if isinstance(axes, (list, tuple)):
1157
+ axes = [[ax] if isinstance(ax, int) else ax for ax in axes]
1158
+
1159
+ target_dtype = _dtypes_impl.result_type_impl(a, b)
1160
+ a = _util.cast_if_needed(a, target_dtype)
1161
+ b = _util.cast_if_needed(b, target_dtype)
1162
+
1163
+ return torch.tensordot(a, b, dims=axes)
1164
+
1165
+
1166
+ def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1167
+ dtype = _dtypes_impl.result_type_impl(a, b)
1168
+ is_bool = dtype == torch.bool
1169
+ if is_bool:
1170
+ dtype = torch.uint8
1171
+
1172
+ a = _util.cast_if_needed(a, dtype)
1173
+ b = _util.cast_if_needed(b, dtype)
1174
+
1175
+ if a.ndim == 0 or b.ndim == 0:
1176
+ result = a * b
1177
+ else:
1178
+ result = torch.matmul(a, b)
1179
+
1180
+ if is_bool:
1181
+ result = result.to(torch.bool)
1182
+
1183
+ return result
1184
+
1185
+
1186
+ def inner(a: ArrayLike, b: ArrayLike, /):
1187
+ dtype = _dtypes_impl.result_type_impl(a, b)
1188
+ is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu)
1189
+ is_bool = dtype == torch.bool
1190
+
1191
+ if is_half:
1192
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
1193
+ dtype = torch.float32
1194
+ elif is_bool:
1195
+ dtype = torch.uint8
1196
+
1197
+ a = _util.cast_if_needed(a, dtype)
1198
+ b = _util.cast_if_needed(b, dtype)
1199
+
1200
+ result = torch.inner(a, b)
1201
+
1202
+ if is_half:
1203
+ result = result.to(torch.float16)
1204
+ elif is_bool:
1205
+ result = result.to(torch.bool)
1206
+ return result
1207
+
1208
+
1209
+ def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1210
+ return torch.outer(a, b)
1211
+
1212
+
1213
+ def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None):
1214
+ # implementation vendored from
1215
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685
1216
+ if axis is not None:
1217
+ axisa, axisb, axisc = (axis,) * 3
1218
+
1219
+ # Check axisa and axisb are within bounds
1220
+ axisa = _util.normalize_axis_index(axisa, a.ndim)
1221
+ axisb = _util.normalize_axis_index(axisb, b.ndim)
1222
+
1223
+ # Move working axis to the end of the shape
1224
+ a = torch.moveaxis(a, axisa, -1)
1225
+ b = torch.moveaxis(b, axisb, -1)
1226
+ msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)"
1227
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
1228
+ raise ValueError(msg)
1229
+
1230
+ # Create the output array
1231
+ shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape)
1232
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
1233
+ shape += (3,)
1234
+ # Check axisc is within bounds
1235
+ axisc = _util.normalize_axis_index(axisc, len(shape))
1236
+ dtype = _dtypes_impl.result_type_impl(a, b)
1237
+ cp = torch.empty(shape, dtype=dtype)
1238
+
1239
+ # recast arrays as dtype
1240
+ a = _util.cast_if_needed(a, dtype)
1241
+ b = _util.cast_if_needed(b, dtype)
1242
+
1243
+ # create local aliases for readability
1244
+ a0 = a[..., 0]
1245
+ a1 = a[..., 1]
1246
+ if a.shape[-1] == 3:
1247
+ a2 = a[..., 2]
1248
+ b0 = b[..., 0]
1249
+ b1 = b[..., 1]
1250
+ if b.shape[-1] == 3:
1251
+ b2 = b[..., 2]
1252
+ if cp.ndim != 0 and cp.shape[-1] == 3:
1253
+ cp0 = cp[..., 0]
1254
+ cp1 = cp[..., 1]
1255
+ cp2 = cp[..., 2]
1256
+
1257
+ if a.shape[-1] == 2:
1258
+ if b.shape[-1] == 2:
1259
+ # a0 * b1 - a1 * b0
1260
+ cp[...] = a0 * b1 - a1 * b0
1261
+ return cp
1262
+ else:
1263
+ assert b.shape[-1] == 3
1264
+ # cp0 = a1 * b2 - 0 (a2 = 0)
1265
+ # cp1 = 0 - a0 * b2 (a2 = 0)
1266
+ # cp2 = a0 * b1 - a1 * b0
1267
+ cp0[...] = a1 * b2
1268
+ cp1[...] = -a0 * b2
1269
+ cp2[...] = a0 * b1 - a1 * b0
1270
+ else:
1271
+ assert a.shape[-1] == 3
1272
+ if b.shape[-1] == 3:
1273
+ cp0[...] = a1 * b2 - a2 * b1
1274
+ cp1[...] = a2 * b0 - a0 * b2
1275
+ cp2[...] = a0 * b1 - a1 * b0
1276
+ else:
1277
+ assert b.shape[-1] == 2
1278
+ cp0[...] = -a2 * b1
1279
+ cp1[...] = a2 * b0
1280
+ cp2[...] = a0 * b1 - a1 * b0
1281
+
1282
+ return torch.moveaxis(cp, -1, axisc)
1283
+
1284
+
1285
+ def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False):
1286
+ # Have to manually normalize *operands and **kwargs, following the NumPy signature
1287
+ # We have a local import to avoid poluting the global space, as it will be then
1288
+ # exported in funcs.py
1289
+ from ._ndarray import ndarray
1290
+ from ._normalizations import (
1291
+ maybe_copy_to,
1292
+ normalize_array_like,
1293
+ normalize_casting,
1294
+ normalize_dtype,
1295
+ wrap_tensors,
1296
+ )
1297
+
1298
+ dtype = normalize_dtype(dtype)
1299
+ casting = normalize_casting(casting)
1300
+ if out is not None and not isinstance(out, ndarray):
1301
+ raise TypeError("'out' must be an array")
1302
+ if order != "K":
1303
+ raise NotImplementedError("'order' parameter is not supported.")
1304
+
1305
+ # parse arrays and normalize them
1306
+ sublist_format = not isinstance(operands[0], str)
1307
+ if sublist_format:
1308
+ # op, str, op, str ... [sublistout] format: normalize every other argument
1309
+
1310
+ # - if sublistout is not given, the length of operands is even, and we pick
1311
+ # odd-numbered elements, which are arrays.
1312
+ # - if sublistout is given, the length of operands is odd, we peel off
1313
+ # the last one, and pick odd-numbered elements, which are arrays.
1314
+ # Without [:-1], we would have picked sublistout, too.
1315
+ array_operands = operands[:-1][::2]
1316
+ else:
1317
+ # ("ij->", arrays) format
1318
+ subscripts, array_operands = operands[0], operands[1:]
1319
+
1320
+ tensors = [normalize_array_like(op) for op in array_operands]
1321
+ target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype
1322
+
1323
+ # work around 'bmm' not implemented for 'Half' etc
1324
+ is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors)
1325
+ if is_half:
1326
+ target_dtype = torch.float32
1327
+
1328
+ is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32]
1329
+ if is_short_int:
1330
+ target_dtype = torch.int64
1331
+
1332
+ tensors = _util.typecast_tensors(tensors, target_dtype, casting)
1333
+
1334
+ from torch.backends import opt_einsum
1335
+
1336
+ try:
1337
+ # set the global state to handle the optimize=... argument, restore on exit
1338
+ if opt_einsum.is_available():
1339
+ old_strategy = torch.backends.opt_einsum.strategy
1340
+ old_enabled = torch.backends.opt_einsum.enabled
1341
+
1342
+ # torch.einsum calls opt_einsum.contract_path, which runs into
1343
+ # https://github.com/dgasmith/opt_einsum/issues/219
1344
+ # for strategy={True, False}
1345
+ if optimize is True:
1346
+ optimize = "auto"
1347
+ elif optimize is False:
1348
+ torch.backends.opt_einsum.enabled = False
1349
+
1350
+ torch.backends.opt_einsum.strategy = optimize
1351
+
1352
+ if sublist_format:
1353
+ # recombine operands
1354
+ sublists = operands[1::2]
1355
+ has_sublistout = len(operands) % 2 == 1
1356
+ if has_sublistout:
1357
+ sublistout = operands[-1]
1358
+ operands = list(itertools.chain.from_iterable(zip(tensors, sublists)))
1359
+ if has_sublistout:
1360
+ operands.append(sublistout)
1361
+
1362
+ result = torch.einsum(*operands)
1363
+ else:
1364
+ result = torch.einsum(subscripts, *tensors)
1365
+
1366
+ finally:
1367
+ if opt_einsum.is_available():
1368
+ torch.backends.opt_einsum.strategy = old_strategy
1369
+ torch.backends.opt_einsum.enabled = old_enabled
1370
+
1371
+ result = maybe_copy_to(out, result)
1372
+ return wrap_tensors(result)
1373
+
1374
+
1375
+ # ### sort and partition ###
1376
+
1377
+
1378
+ def _sort_helper(tensor, axis, kind, order):
1379
+ if tensor.dtype.is_complex:
1380
+ raise NotImplementedError(f"sorting {tensor.dtype} is not supported")
1381
+ (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis)
1382
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
1383
+
1384
+ stable = kind == "stable"
1385
+
1386
+ return tensor, axis, stable
1387
+
1388
+
1389
+ def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1390
+ # `order` keyword arg is only relevant for structured dtypes; so not supported here.
1391
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1392
+ result = torch.sort(a, dim=axis, stable=stable)
1393
+ return result.values
1394
+
1395
+
1396
+ def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1397
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1398
+ return torch.argsort(a, dim=axis, stable=stable)
1399
+
1400
+
1401
+ def searchsorted(
1402
+ a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None
1403
+ ):
1404
+ if a.dtype.is_complex:
1405
+ raise NotImplementedError(f"searchsorted with dtype={a.dtype}")
1406
+
1407
+ return torch.searchsorted(a, v, side=side, sorter=sorter)
1408
+
1409
+
1410
+ # ### swap/move/roll axis ###
1411
+
1412
+
1413
+ def moveaxis(a: ArrayLike, source, destination):
1414
+ source = _util.normalize_axis_tuple(source, a.ndim, "source")
1415
+ destination = _util.normalize_axis_tuple(destination, a.ndim, "destination")
1416
+ return torch.moveaxis(a, source, destination)
1417
+
1418
+
1419
+ def swapaxes(a: ArrayLike, axis1, axis2):
1420
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1421
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1422
+ return torch.swapaxes(a, axis1, axis2)
1423
+
1424
+
1425
+ def rollaxis(a: ArrayLike, axis, start=0):
1426
+ # Straight vendor from:
1427
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259
1428
+ #
1429
+ # Also note this function in NumPy is mostly retained for backwards compat
1430
+ # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing)
1431
+ # so let's not touch it unless hard pressed.
1432
+ n = a.ndim
1433
+ axis = _util.normalize_axis_index(axis, n)
1434
+ if start < 0:
1435
+ start += n
1436
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
1437
+ if not (0 <= start < n + 1):
1438
+ raise _util.AxisError(msg % ("start", -n, "start", n + 1, start))
1439
+ if axis < start:
1440
+ # it's been removed
1441
+ start -= 1
1442
+ if axis == start:
1443
+ # numpy returns a view, here we try returning the tensor itself
1444
+ # return tensor[...]
1445
+ return a
1446
+ axes = list(range(0, n))
1447
+ axes.remove(axis)
1448
+ axes.insert(start, axis)
1449
+ return a.view(axes)
1450
+
1451
+
1452
+ def roll(a: ArrayLike, shift, axis=None):
1453
+ if axis is not None:
1454
+ axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
1455
+ if not isinstance(shift, tuple):
1456
+ shift = (shift,) * len(axis)
1457
+ return torch.roll(a, shift, axis)
1458
+
1459
+
1460
+ # ### shape manipulations ###
1461
+
1462
+
1463
+ def squeeze(a: ArrayLike, axis=None):
1464
+ if axis == ():
1465
+ result = a
1466
+ elif axis is None:
1467
+ result = a.squeeze()
1468
+ else:
1469
+ if isinstance(axis, tuple):
1470
+ result = a
1471
+ for ax in axis:
1472
+ result = a.squeeze(ax)
1473
+ else:
1474
+ result = a.squeeze(axis)
1475
+ return result
1476
+
1477
+
1478
+ def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"):
1479
+ # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh)
1480
+ newshape = newshape[0] if len(newshape) == 1 else newshape
1481
+ return a.reshape(newshape)
1482
+
1483
+
1484
+ # NB: cannot use torch.reshape(a, newshape) above, because of
1485
+ # (Pdb) torch.reshape(torch.as_tensor([1]), 1)
1486
+ # *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int
1487
+
1488
+
1489
+ def transpose(a: ArrayLike, axes=None):
1490
+ # numpy allows both .transpose(sh) and .transpose(*sh)
1491
+ # also older code uses axes being a list
1492
+ if axes in [(), None, (None,)]:
1493
+ axes = tuple(reversed(range(a.ndim)))
1494
+ elif len(axes) == 1:
1495
+ axes = axes[0]
1496
+ return a.permute(axes)
1497
+
1498
+
1499
+ def ravel(a: ArrayLike, order: NotImplementedType = "C"):
1500
+ return torch.flatten(a)
1501
+
1502
+
1503
+ def diff(
1504
+ a: ArrayLike,
1505
+ n=1,
1506
+ axis=-1,
1507
+ prepend: Optional[ArrayLike] = None,
1508
+ append: Optional[ArrayLike] = None,
1509
+ ):
1510
+ axis = _util.normalize_axis_index(axis, a.ndim)
1511
+
1512
+ if n < 0:
1513
+ raise ValueError(f"order must be non-negative but got {n}")
1514
+
1515
+ if n == 0:
1516
+ # match numpy and return the input immediately
1517
+ return a
1518
+
1519
+ if prepend is not None:
1520
+ shape = list(a.shape)
1521
+ shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1
1522
+ prepend = torch.broadcast_to(prepend, shape)
1523
+
1524
+ if append is not None:
1525
+ shape = list(a.shape)
1526
+ shape[axis] = append.shape[axis] if append.ndim > 0 else 1
1527
+ append = torch.broadcast_to(append, shape)
1528
+
1529
+ return torch.diff(a, n, axis=axis, prepend=prepend, append=append)
1530
+
1531
+
1532
+ # ### math functions ###
1533
+
1534
+
1535
+ def angle(z: ArrayLike, deg=False):
1536
+ result = torch.angle(z)
1537
+ if deg:
1538
+ result = result * (180 / torch.pi)
1539
+ return result
1540
+
1541
+
1542
+ def sinc(x: ArrayLike):
1543
+ return torch.sinc(x)
1544
+
1545
+
1546
+ # NB: have to normalize *varargs manually
1547
+ def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1):
1548
+ N = f.ndim # number of dimensions
1549
+
1550
+ varargs = _util.ndarrays_to_tensors(varargs)
1551
+
1552
+ if axis is None:
1553
+ axes = tuple(range(N))
1554
+ else:
1555
+ axes = _util.normalize_axis_tuple(axis, N)
1556
+
1557
+ len_axes = len(axes)
1558
+ n = len(varargs)
1559
+ if n == 0:
1560
+ # no spacing argument - use 1 in all axes
1561
+ dx = [1.0] * len_axes
1562
+ elif n == 1 and (_dtypes_impl.is_scalar(varargs[0]) or varargs[0].ndim == 0):
1563
+ # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0)
1564
+ dx = varargs * len_axes
1565
+ elif n == len_axes:
1566
+ # scalar or 1d array for each axis
1567
+ dx = list(varargs)
1568
+ for i, distances in enumerate(dx):
1569
+ distances = torch.as_tensor(distances)
1570
+ if distances.ndim == 0:
1571
+ continue
1572
+ elif distances.ndim != 1:
1573
+ raise ValueError("distances must be either scalars or 1d")
1574
+ if len(distances) != f.shape[axes[i]]:
1575
+ raise ValueError(
1576
+ "when 1d, distances must match "
1577
+ "the length of the corresponding dimension"
1578
+ )
1579
+ if not (distances.dtype.is_floating_point or distances.dtype.is_complex):
1580
+ distances = distances.double()
1581
+
1582
+ diffx = torch.diff(distances)
1583
+ # if distances are constant reduce to the scalar case
1584
+ # since it brings a consistent speedup
1585
+ if (diffx == diffx[0]).all():
1586
+ diffx = diffx[0]
1587
+ dx[i] = diffx
1588
+ else:
1589
+ raise TypeError("invalid number of arguments")
1590
+
1591
+ if edge_order > 2:
1592
+ raise ValueError("'edge_order' greater than 2 not supported")
1593
+
1594
+ # use central differences on interior and one-sided differences on the
1595
+ # endpoints. This preserves second order-accuracy over the full domain.
1596
+
1597
+ outvals = []
1598
+
1599
+ # create slice objects --- initially all are [:, :, ..., :]
1600
+ slice1 = [slice(None)] * N
1601
+ slice2 = [slice(None)] * N
1602
+ slice3 = [slice(None)] * N
1603
+ slice4 = [slice(None)] * N
1604
+
1605
+ otype = f.dtype
1606
+ if _dtypes_impl.python_type_for_torch(otype) in (int, bool):
1607
+ # Convert to floating point.
1608
+ # First check if f is a numpy integer type; if so, convert f to float64
1609
+ # to avoid modular arithmetic when computing the changes in f.
1610
+ f = f.double()
1611
+ otype = torch.float64
1612
+
1613
+ for axis, ax_dx in zip(axes, dx):
1614
+ if f.shape[axis] < edge_order + 1:
1615
+ raise ValueError(
1616
+ "Shape of array too small to calculate a numerical gradient, "
1617
+ "at least (edge_order + 1) elements are required."
1618
+ )
1619
+ # result allocation
1620
+ out = torch.empty_like(f, dtype=otype)
1621
+
1622
+ # spacing for the current axis (NB: np.ndim(ax_dx) == 0)
1623
+ uniform_spacing = _dtypes_impl.is_scalar(ax_dx) or ax_dx.ndim == 0
1624
+
1625
+ # Numerical differentiation: 2nd order interior
1626
+ slice1[axis] = slice(1, -1)
1627
+ slice2[axis] = slice(None, -2)
1628
+ slice3[axis] = slice(1, -1)
1629
+ slice4[axis] = slice(2, None)
1630
+
1631
+ if uniform_spacing:
1632
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx)
1633
+ else:
1634
+ dx1 = ax_dx[0:-1]
1635
+ dx2 = ax_dx[1:]
1636
+ a = -(dx2) / (dx1 * (dx1 + dx2))
1637
+ b = (dx2 - dx1) / (dx1 * dx2)
1638
+ c = dx1 / (dx2 * (dx1 + dx2))
1639
+ # fix the shape for broadcasting
1640
+ shape = [1] * N
1641
+ shape[axis] = -1
1642
+ a = a.reshape(shape)
1643
+ b = b.reshape(shape)
1644
+ c = c.reshape(shape)
1645
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
1646
+ out[tuple(slice1)] = (
1647
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1648
+ )
1649
+
1650
+ # Numerical differentiation: 1st order edges
1651
+ if edge_order == 1:
1652
+ slice1[axis] = 0
1653
+ slice2[axis] = 1
1654
+ slice3[axis] = 0
1655
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
1656
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
1657
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
1658
+
1659
+ slice1[axis] = -1
1660
+ slice2[axis] = -1
1661
+ slice3[axis] = -2
1662
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
1663
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
1664
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
1665
+
1666
+ # Numerical differentiation: 2nd order edges
1667
+ else:
1668
+ slice1[axis] = 0
1669
+ slice2[axis] = 0
1670
+ slice3[axis] = 1
1671
+ slice4[axis] = 2
1672
+ if uniform_spacing:
1673
+ a = -1.5 / ax_dx
1674
+ b = 2.0 / ax_dx
1675
+ c = -0.5 / ax_dx
1676
+ else:
1677
+ dx1 = ax_dx[0]
1678
+ dx2 = ax_dx[1]
1679
+ a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
1680
+ b = (dx1 + dx2) / (dx1 * dx2)
1681
+ c = -dx1 / (dx2 * (dx1 + dx2))
1682
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
1683
+ out[tuple(slice1)] = (
1684
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1685
+ )
1686
+
1687
+ slice1[axis] = -1
1688
+ slice2[axis] = -3
1689
+ slice3[axis] = -2
1690
+ slice4[axis] = -1
1691
+ if uniform_spacing:
1692
+ a = 0.5 / ax_dx
1693
+ b = -2.0 / ax_dx
1694
+ c = 1.5 / ax_dx
1695
+ else:
1696
+ dx1 = ax_dx[-2]
1697
+ dx2 = ax_dx[-1]
1698
+ a = (dx2) / (dx1 * (dx1 + dx2))
1699
+ b = -(dx2 + dx1) / (dx1 * dx2)
1700
+ c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
1701
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
1702
+ out[tuple(slice1)] = (
1703
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1704
+ )
1705
+
1706
+ outvals.append(out)
1707
+
1708
+ # reset the slice object in this dimension to ":"
1709
+ slice1[axis] = slice(None)
1710
+ slice2[axis] = slice(None)
1711
+ slice3[axis] = slice(None)
1712
+ slice4[axis] = slice(None)
1713
+
1714
+ if len_axes == 1:
1715
+ return outvals[0]
1716
+ else:
1717
+ return outvals
1718
+
1719
+
1720
+ # ### Type/shape etc queries ###
1721
+
1722
+
1723
+ def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None):
1724
+ if a.is_floating_point():
1725
+ result = torch.round(a, decimals=decimals)
1726
+ elif a.is_complex():
1727
+ # RuntimeError: "round_cpu" not implemented for 'ComplexFloat'
1728
+ result = torch.complex(
1729
+ torch.round(a.real, decimals=decimals),
1730
+ torch.round(a.imag, decimals=decimals),
1731
+ )
1732
+ else:
1733
+ # RuntimeError: "round_cpu" not implemented for 'int'
1734
+ result = a
1735
+ return result
1736
+
1737
+
1738
+ around = round
1739
+ round_ = round
1740
+
1741
+
1742
+ def real_if_close(a: ArrayLike, tol=100):
1743
+ if not torch.is_complex(a):
1744
+ return a
1745
+ if tol > 1:
1746
+ # Undocumented in numpy: if tol < 1, it's an absolute tolerance!
1747
+ # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon
1748
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577
1749
+ tol = tol * torch.finfo(a.dtype).eps
1750
+
1751
+ mask = torch.abs(a.imag) < tol
1752
+ return a.real if mask.all() else a
1753
+
1754
+
1755
+ def real(a: ArrayLike):
1756
+ return torch.real(a)
1757
+
1758
+
1759
+ def imag(a: ArrayLike):
1760
+ if a.is_complex():
1761
+ return a.imag
1762
+ return torch.zeros_like(a)
1763
+
1764
+
1765
+ def iscomplex(x: ArrayLike):
1766
+ if torch.is_complex(x):
1767
+ return x.imag != 0
1768
+ return torch.zeros_like(x, dtype=torch.bool)
1769
+
1770
+
1771
+ def isreal(x: ArrayLike):
1772
+ if torch.is_complex(x):
1773
+ return x.imag == 0
1774
+ return torch.ones_like(x, dtype=torch.bool)
1775
+
1776
+
1777
+ def iscomplexobj(x: ArrayLike):
1778
+ return torch.is_complex(x)
1779
+
1780
+
1781
+ def isrealobj(x: ArrayLike):
1782
+ return not torch.is_complex(x)
1783
+
1784
+
1785
+ def isneginf(x: ArrayLike, out: Optional[OutArray] = None):
1786
+ return torch.isneginf(x)
1787
+
1788
+
1789
+ def isposinf(x: ArrayLike, out: Optional[OutArray] = None):
1790
+ return torch.isposinf(x)
1791
+
1792
+
1793
+ def i0(x: ArrayLike):
1794
+ return torch.special.i0(x)
1795
+
1796
+
1797
+ def isscalar(a):
1798
+ # We need to use normalize_array_like, but we don't want to export it in funcs.py
1799
+ from ._normalizations import normalize_array_like
1800
+
1801
+ try:
1802
+ t = normalize_array_like(a)
1803
+ return t.numel() == 1
1804
+ except Exception:
1805
+ return False
1806
+
1807
+
1808
+ # ### Filter windows ###
1809
+
1810
+
1811
+ def hamming(M):
1812
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1813
+ return torch.hamming_window(M, periodic=False, dtype=dtype)
1814
+
1815
+
1816
+ def hanning(M):
1817
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1818
+ return torch.hann_window(M, periodic=False, dtype=dtype)
1819
+
1820
+
1821
+ def kaiser(M, beta):
1822
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1823
+ return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype)
1824
+
1825
+
1826
+ def blackman(M):
1827
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1828
+ return torch.blackman_window(M, periodic=False, dtype=dtype)
1829
+
1830
+
1831
+ def bartlett(M):
1832
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1833
+ return torch.bartlett_window(M, periodic=False, dtype=dtype)
1834
+
1835
+
1836
+ # ### Dtype routines ###
1837
+
1838
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666
1839
+
1840
+
1841
+ array_type = [
1842
+ [torch.float16, torch.float32, torch.float64],
1843
+ [None, torch.complex64, torch.complex128],
1844
+ ]
1845
+ array_precision = {
1846
+ torch.float16: 0,
1847
+ torch.float32: 1,
1848
+ torch.float64: 2,
1849
+ torch.complex64: 1,
1850
+ torch.complex128: 2,
1851
+ }
1852
+
1853
+
1854
+ def common_type(*tensors: ArrayLike):
1855
+ is_complex = False
1856
+ precision = 0
1857
+ for a in tensors:
1858
+ t = a.dtype
1859
+ if iscomplexobj(a):
1860
+ is_complex = True
1861
+ if not (t.is_floating_point or t.is_complex):
1862
+ p = 2 # array_precision[_nx.double]
1863
+ else:
1864
+ p = array_precision.get(t, None)
1865
+ if p is None:
1866
+ raise TypeError("can't get common type for non-numeric array")
1867
+ precision = builtins.max(precision, p)
1868
+ if is_complex:
1869
+ return array_type[1][precision]
1870
+ else:
1871
+ return array_type[0][precision]
1872
+
1873
+
1874
+ # ### histograms ###
1875
+
1876
+
1877
+ def histogram(
1878
+ a: ArrayLike,
1879
+ bins: ArrayLike = 10,
1880
+ range=None,
1881
+ normed=None,
1882
+ weights: Optional[ArrayLike] = None,
1883
+ density=None,
1884
+ ):
1885
+ if normed is not None:
1886
+ raise ValueError("normed argument is deprecated, use density= instead")
1887
+
1888
+ if weights is not None and weights.dtype.is_complex:
1889
+ raise NotImplementedError("complex weights histogram.")
1890
+
1891
+ is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex)
1892
+ is_w_int = weights is None or not weights.dtype.is_floating_point
1893
+ if is_a_int:
1894
+ a = a.double()
1895
+
1896
+ if weights is not None:
1897
+ weights = _util.cast_if_needed(weights, a.dtype)
1898
+
1899
+ if isinstance(bins, torch.Tensor):
1900
+ if bins.ndim == 0:
1901
+ # bins was a single int
1902
+ bins = operator.index(bins)
1903
+ else:
1904
+ bins = _util.cast_if_needed(bins, a.dtype)
1905
+
1906
+ if range is None:
1907
+ h, b = torch.histogram(a, bins, weight=weights, density=bool(density))
1908
+ else:
1909
+ h, b = torch.histogram(
1910
+ a, bins, range=range, weight=weights, density=bool(density)
1911
+ )
1912
+
1913
+ if not density and is_w_int:
1914
+ h = h.long()
1915
+ if is_a_int:
1916
+ b = b.long()
1917
+
1918
+ return h, b
1919
+
1920
+
1921
+ def histogram2d(
1922
+ x,
1923
+ y,
1924
+ bins=10,
1925
+ range: Optional[ArrayLike] = None,
1926
+ normed=None,
1927
+ weights: Optional[ArrayLike] = None,
1928
+ density=None,
1929
+ ):
1930
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821
1931
+ if len(x) != len(y):
1932
+ raise ValueError("x and y must have the same length.")
1933
+
1934
+ try:
1935
+ N = len(bins)
1936
+ except TypeError:
1937
+ N = 1
1938
+
1939
+ if N != 1 and N != 2:
1940
+ bins = [bins, bins]
1941
+
1942
+ h, e = histogramdd((x, y), bins, range, normed, weights, density)
1943
+
1944
+ return h, e[0], e[1]
1945
+
1946
+
1947
+ def histogramdd(
1948
+ sample,
1949
+ bins=10,
1950
+ range: Optional[ArrayLike] = None,
1951
+ normed=None,
1952
+ weights: Optional[ArrayLike] = None,
1953
+ density=None,
1954
+ ):
1955
+ # have to normalize manually because `sample` interpretation differs
1956
+ # for a list of lists and a 2D array
1957
+ if normed is not None:
1958
+ raise ValueError("normed argument is deprecated, use density= instead")
1959
+
1960
+ from ._normalizations import normalize_array_like, normalize_seq_array_like
1961
+
1962
+ if isinstance(sample, (list, tuple)):
1963
+ sample = normalize_array_like(sample).T
1964
+ else:
1965
+ sample = normalize_array_like(sample)
1966
+
1967
+ sample = torch.atleast_2d(sample)
1968
+
1969
+ if not (sample.dtype.is_floating_point or sample.dtype.is_complex):
1970
+ sample = sample.double()
1971
+
1972
+ # bins is either an int, or a sequence of ints or a sequence of arrays
1973
+ bins_is_array = not (
1974
+ isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins)
1975
+ )
1976
+ if bins_is_array:
1977
+ bins = normalize_seq_array_like(bins)
1978
+ bins_dtypes = [b.dtype for b in bins]
1979
+ bins = [_util.cast_if_needed(b, sample.dtype) for b in bins]
1980
+
1981
+ if range is not None:
1982
+ range = range.flatten().tolist()
1983
+
1984
+ if weights is not None:
1985
+ # range=... is required : interleave min and max values per dimension
1986
+ mm = sample.aminmax(dim=0)
1987
+ range = torch.cat(mm).reshape(2, -1).T.flatten()
1988
+ range = tuple(range.tolist())
1989
+ weights = _util.cast_if_needed(weights, sample.dtype)
1990
+ w_kwd = {"weight": weights}
1991
+ else:
1992
+ w_kwd = {}
1993
+
1994
+ h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd)
1995
+
1996
+ if bins_is_array:
1997
+ b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)]
1998
+
1999
+ return h, b
2000
+
2001
+
2002
+ # ### odds and ends
2003
+
2004
+
2005
+ def min_scalar_type(a: ArrayLike, /):
2006
+ # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288
2007
+
2008
+ from ._dtypes import DType
2009
+
2010
+ if a.numel() > 1:
2011
+ # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified."
2012
+ return DType(a.dtype)
2013
+
2014
+ if a.dtype == torch.bool:
2015
+ dtype = torch.bool
2016
+
2017
+ elif a.dtype.is_complex:
2018
+ fi = torch.finfo(torch.float32)
2019
+ fits_in_single = a.dtype == torch.complex64 or (
2020
+ fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max
2021
+ )
2022
+ dtype = torch.complex64 if fits_in_single else torch.complex128
2023
+
2024
+ elif a.dtype.is_floating_point:
2025
+ for dt in [torch.float16, torch.float32, torch.float64]:
2026
+ fi = torch.finfo(dt)
2027
+ if fi.min <= a <= fi.max:
2028
+ dtype = dt
2029
+ break
2030
+ else:
2031
+ # must be integer
2032
+ for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
2033
+ # Prefer unsigned int where possible, as numpy does.
2034
+ ii = torch.iinfo(dt)
2035
+ if ii.min <= a <= ii.max:
2036
+ dtype = dt
2037
+ break
2038
+
2039
+ return DType(dtype)
2040
+
2041
+
2042
+ def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs):
2043
+ if mode != "constant":
2044
+ raise NotImplementedError
2045
+ value = kwargs.get("constant_values", 0)
2046
+ # `value` must be a python scalar for torch.nn.functional.pad
2047
+ typ = _dtypes_impl.python_type_for_torch(array.dtype)
2048
+ value = typ(value)
2049
+
2050
+ pad_width = torch.broadcast_to(pad_width, (array.ndim, 2))
2051
+ pad_width = torch.flip(pad_width, (0,)).flatten()
2052
+
2053
+ return torch.nn.functional.pad(array, tuple(pad_width), value=value)
venv/lib/python3.10/site-packages/torch/_numpy/_getlimits.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+
5
+ from . import _dtypes
6
+
7
+
8
+ def finfo(dtyp):
9
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
10
+ return torch.finfo(torch_dtype)
11
+
12
+
13
+ def iinfo(dtyp):
14
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
15
+ return torch.iinfo(torch_dtype)
venv/lib/python3.10/site-packages/torch/_numpy/_ndarray.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import builtins
6
+ import math
7
+ import operator
8
+ from typing import Sequence
9
+
10
+ import torch
11
+
12
+ from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util
13
+ from ._normalizations import (
14
+ ArrayLike,
15
+ normalize_array_like,
16
+ normalizer,
17
+ NotImplementedType,
18
+ )
19
+
20
+ newaxis = None
21
+
22
+ FLAGS = [
23
+ "C_CONTIGUOUS",
24
+ "F_CONTIGUOUS",
25
+ "OWNDATA",
26
+ "WRITEABLE",
27
+ "ALIGNED",
28
+ "WRITEBACKIFCOPY",
29
+ "FNC",
30
+ "FORC",
31
+ "BEHAVED",
32
+ "CARRAY",
33
+ "FARRAY",
34
+ ]
35
+
36
+ SHORTHAND_TO_FLAGS = {
37
+ "C": "C_CONTIGUOUS",
38
+ "F": "F_CONTIGUOUS",
39
+ "O": "OWNDATA",
40
+ "W": "WRITEABLE",
41
+ "A": "ALIGNED",
42
+ "X": "WRITEBACKIFCOPY",
43
+ "B": "BEHAVED",
44
+ "CA": "CARRAY",
45
+ "FA": "FARRAY",
46
+ }
47
+
48
+
49
+ class Flags:
50
+ def __init__(self, flag_to_value: dict):
51
+ assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check
52
+ self._flag_to_value = flag_to_value
53
+
54
+ def __getattr__(self, attr: str):
55
+ if attr.islower() and attr.upper() in FLAGS:
56
+ return self[attr.upper()]
57
+ else:
58
+ raise AttributeError(f"No flag attribute '{attr}'")
59
+
60
+ def __getitem__(self, key):
61
+ if key in SHORTHAND_TO_FLAGS.keys():
62
+ key = SHORTHAND_TO_FLAGS[key]
63
+ if key in FLAGS:
64
+ try:
65
+ return self._flag_to_value[key]
66
+ except KeyError as e:
67
+ raise NotImplementedError(f"{key=}") from e
68
+ else:
69
+ raise KeyError(f"No flag key '{key}'")
70
+
71
+ def __setattr__(self, attr, value):
72
+ if attr.islower() and attr.upper() in FLAGS:
73
+ self[attr.upper()] = value
74
+ else:
75
+ super().__setattr__(attr, value)
76
+
77
+ def __setitem__(self, key, value):
78
+ if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys():
79
+ raise NotImplementedError("Modifying flags is not implemented")
80
+ else:
81
+ raise KeyError(f"No flag key '{key}'")
82
+
83
+
84
+ def create_method(fn, name=None):
85
+ name = name or fn.__name__
86
+
87
+ def f(*args, **kwargs):
88
+ return fn(*args, **kwargs)
89
+
90
+ f.__name__ = name
91
+ f.__qualname__ = f"ndarray.{name}"
92
+ return f
93
+
94
+
95
+ # Map ndarray.name_method -> np.name_func
96
+ # If name_func == None, it means that name_method == name_func
97
+ methods = {
98
+ "clip": None,
99
+ "nonzero": None,
100
+ "repeat": None,
101
+ "round": None,
102
+ "squeeze": None,
103
+ "swapaxes": None,
104
+ "ravel": None,
105
+ # linalg
106
+ "diagonal": None,
107
+ "dot": None,
108
+ "trace": None,
109
+ # sorting
110
+ "argsort": None,
111
+ "searchsorted": None,
112
+ # reductions
113
+ "argmax": None,
114
+ "argmin": None,
115
+ "any": None,
116
+ "all": None,
117
+ "max": None,
118
+ "min": None,
119
+ "ptp": None,
120
+ "sum": None,
121
+ "prod": None,
122
+ "mean": None,
123
+ "var": None,
124
+ "std": None,
125
+ # scans
126
+ "cumsum": None,
127
+ "cumprod": None,
128
+ # advanced indexing
129
+ "take": None,
130
+ "choose": None,
131
+ }
132
+
133
+ dunder = {
134
+ "abs": "absolute",
135
+ "invert": None,
136
+ "pos": "positive",
137
+ "neg": "negative",
138
+ "gt": "greater",
139
+ "lt": "less",
140
+ "ge": "greater_equal",
141
+ "le": "less_equal",
142
+ }
143
+
144
+ # dunder methods with right-looking and in-place variants
145
+ ri_dunder = {
146
+ "add": None,
147
+ "sub": "subtract",
148
+ "mul": "multiply",
149
+ "truediv": "divide",
150
+ "floordiv": "floor_divide",
151
+ "pow": "power",
152
+ "mod": "remainder",
153
+ "and": "bitwise_and",
154
+ "or": "bitwise_or",
155
+ "xor": "bitwise_xor",
156
+ "lshift": "left_shift",
157
+ "rshift": "right_shift",
158
+ "matmul": None,
159
+ }
160
+
161
+
162
+ def _upcast_int_indices(index):
163
+ if isinstance(index, torch.Tensor):
164
+ if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
165
+ return index.to(torch.int64)
166
+ elif isinstance(index, tuple):
167
+ return tuple(_upcast_int_indices(i) for i in index)
168
+ return index
169
+
170
+
171
+ # Used to indicate that a parameter is unspecified (as opposed to explicitly
172
+ # `None`)
173
+ class _Unspecified:
174
+ pass
175
+
176
+
177
+ _Unspecified.unspecified = _Unspecified()
178
+
179
+ ###############################################################
180
+ # ndarray class #
181
+ ###############################################################
182
+
183
+
184
+ class ndarray:
185
+ def __init__(self, t=None):
186
+ if t is None:
187
+ self.tensor = torch.Tensor()
188
+ elif isinstance(t, torch.Tensor):
189
+ self.tensor = t
190
+ else:
191
+ raise ValueError(
192
+ "ndarray constructor is not recommended; prefer"
193
+ "either array(...) or zeros/empty(...)"
194
+ )
195
+
196
+ # Register NumPy functions as methods
197
+ for method, name in methods.items():
198
+ fn = getattr(_funcs, name or method)
199
+ vars()[method] = create_method(fn, method)
200
+
201
+ # Regular methods but coming from ufuncs
202
+ conj = create_method(_ufuncs.conjugate, "conj")
203
+ conjugate = create_method(_ufuncs.conjugate)
204
+
205
+ for method, name in dunder.items():
206
+ fn = getattr(_ufuncs, name or method)
207
+ method = f"__{method}__"
208
+ vars()[method] = create_method(fn, method)
209
+
210
+ for method, name in ri_dunder.items():
211
+ fn = getattr(_ufuncs, name or method)
212
+ plain = f"__{method}__"
213
+ vars()[plain] = create_method(fn, plain)
214
+ rvar = f"__r{method}__"
215
+ vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
216
+ ivar = f"__i{method}__"
217
+ vars()[ivar] = create_method(
218
+ lambda self, other, fn=fn: fn(self, other, out=self), ivar
219
+ )
220
+
221
+ # There's no __idivmod__
222
+ __divmod__ = create_method(_ufuncs.divmod, "__divmod__")
223
+ __rdivmod__ = create_method(
224
+ lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
225
+ )
226
+
227
+ # prevent loop variables leaking into the ndarray class namespace
228
+ del ivar, rvar, name, plain, fn, method
229
+
230
+ @property
231
+ def shape(self):
232
+ return tuple(self.tensor.shape)
233
+
234
+ @property
235
+ def size(self):
236
+ return self.tensor.numel()
237
+
238
+ @property
239
+ def ndim(self):
240
+ return self.tensor.ndim
241
+
242
+ @property
243
+ def dtype(self):
244
+ return _dtypes.dtype(self.tensor.dtype)
245
+
246
+ @property
247
+ def strides(self):
248
+ elsize = self.tensor.element_size()
249
+ return tuple(stride * elsize for stride in self.tensor.stride())
250
+
251
+ @property
252
+ def itemsize(self):
253
+ return self.tensor.element_size()
254
+
255
+ @property
256
+ def flags(self):
257
+ # Note contiguous in torch is assumed C-style
258
+ return Flags(
259
+ {
260
+ "C_CONTIGUOUS": self.tensor.is_contiguous(),
261
+ "F_CONTIGUOUS": self.T.tensor.is_contiguous(),
262
+ "OWNDATA": self.tensor._base is None,
263
+ "WRITEABLE": True, # pytorch does not have readonly tensors
264
+ }
265
+ )
266
+
267
+ @property
268
+ def data(self):
269
+ return self.tensor.data_ptr()
270
+
271
+ @property
272
+ def nbytes(self):
273
+ return self.tensor.storage().nbytes()
274
+
275
+ @property
276
+ def T(self):
277
+ return self.transpose()
278
+
279
+ @property
280
+ def real(self):
281
+ return _funcs.real(self)
282
+
283
+ @real.setter
284
+ def real(self, value):
285
+ self.tensor.real = asarray(value).tensor
286
+
287
+ @property
288
+ def imag(self):
289
+ return _funcs.imag(self)
290
+
291
+ @imag.setter
292
+ def imag(self, value):
293
+ self.tensor.imag = asarray(value).tensor
294
+
295
+ # ctors
296
+ def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
297
+ if order != "K":
298
+ raise NotImplementedError(f"astype(..., order={order} is not implemented.")
299
+ if casting != "unsafe":
300
+ raise NotImplementedError(
301
+ f"astype(..., casting={casting} is not implemented."
302
+ )
303
+ if not subok:
304
+ raise NotImplementedError(f"astype(..., subok={subok} is not implemented.")
305
+ if not copy:
306
+ raise NotImplementedError(f"astype(..., copy={copy} is not implemented.")
307
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
308
+ t = self.tensor.to(torch_dtype)
309
+ return ndarray(t)
310
+
311
+ @normalizer
312
+ def copy(self: ArrayLike, order: NotImplementedType = "C"):
313
+ return self.clone()
314
+
315
+ @normalizer
316
+ def flatten(self: ArrayLike, order: NotImplementedType = "C"):
317
+ return torch.flatten(self)
318
+
319
+ def resize(self, *new_shape, refcheck=False):
320
+ # NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
321
+ if refcheck:
322
+ raise NotImplementedError(
323
+ f"resize(..., refcheck={refcheck} is not implemented."
324
+ )
325
+ if new_shape in [(), (None,)]:
326
+ return
327
+
328
+ # support both x.resize((2, 2)) and x.resize(2, 2)
329
+ if len(new_shape) == 1:
330
+ new_shape = new_shape[0]
331
+ if isinstance(new_shape, int):
332
+ new_shape = (new_shape,)
333
+
334
+ if builtins.any(x < 0 for x in new_shape):
335
+ raise ValueError("all elements of `new_shape` must be non-negative")
336
+
337
+ new_numel, old_numel = math.prod(new_shape), self.tensor.numel()
338
+
339
+ self.tensor.resize_(new_shape)
340
+
341
+ if new_numel >= old_numel:
342
+ # zero-fill new elements
343
+ assert self.tensor.is_contiguous()
344
+ b = self.tensor.flatten() # does not copy
345
+ b[old_numel:].zero_()
346
+
347
+ def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified):
348
+ if dtype is _Unspecified.unspecified:
349
+ dtype = self.dtype
350
+ if type is not _Unspecified.unspecified:
351
+ raise NotImplementedError(f"view(..., type={type} is not implemented.")
352
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
353
+ tview = self.tensor.view(torch_dtype)
354
+ return ndarray(tview)
355
+
356
+ @normalizer
357
+ def fill(self, value: ArrayLike):
358
+ # Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
359
+ # error out on D > 0 arrays
360
+ self.tensor.fill_(value)
361
+
362
+ def tolist(self):
363
+ return self.tensor.tolist()
364
+
365
+ def __iter__(self):
366
+ return (ndarray(x) for x in self.tensor.__iter__())
367
+
368
+ def __str__(self):
369
+ return (
370
+ str(self.tensor)
371
+ .replace("tensor", "torch.ndarray")
372
+ .replace("dtype=torch.", "dtype=")
373
+ )
374
+
375
+ __repr__ = create_method(__str__)
376
+
377
+ def __eq__(self, other):
378
+ try:
379
+ return _ufuncs.equal(self, other)
380
+ except (RuntimeError, TypeError):
381
+ # Failed to convert other to array: definitely not equal.
382
+ falsy = torch.full(self.shape, fill_value=False, dtype=bool)
383
+ return asarray(falsy)
384
+
385
+ def __ne__(self, other):
386
+ return ~(self == other)
387
+
388
+ def __index__(self):
389
+ try:
390
+ return operator.index(self.tensor.item())
391
+ except Exception as exc:
392
+ raise TypeError(
393
+ "only integer scalar arrays can be converted to a scalar index"
394
+ ) from exc
395
+
396
+ def __bool__(self):
397
+ return bool(self.tensor)
398
+
399
+ def __int__(self):
400
+ return int(self.tensor)
401
+
402
+ def __float__(self):
403
+ return float(self.tensor)
404
+
405
+ def __complex__(self):
406
+ return complex(self.tensor)
407
+
408
+ def is_integer(self):
409
+ try:
410
+ v = self.tensor.item()
411
+ result = int(v) == v
412
+ except Exception:
413
+ result = False
414
+ return result
415
+
416
+ def __len__(self):
417
+ return self.tensor.shape[0]
418
+
419
+ def __contains__(self, x):
420
+ return self.tensor.__contains__(x)
421
+
422
+ def transpose(self, *axes):
423
+ # np.transpose(arr, axis=None) but arr.transpose(*axes)
424
+ return _funcs.transpose(self, axes)
425
+
426
+ def reshape(self, *shape, order="C"):
427
+ # arr.reshape(shape) and arr.reshape(*shape)
428
+ return _funcs.reshape(self, shape, order=order)
429
+
430
+ def sort(self, axis=-1, kind=None, order=None):
431
+ # ndarray.sort works in-place
432
+ _funcs.copyto(self, _funcs.sort(self, axis, kind, order))
433
+
434
+ def item(self, *args):
435
+ # Mimic NumPy's implementation with three special cases (no arguments,
436
+ # a flat index and a multi-index):
437
+ # https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702
438
+ if args == ():
439
+ return self.tensor.item()
440
+ elif len(args) == 1:
441
+ # int argument
442
+ return self.ravel()[args[0]]
443
+ else:
444
+ return self.__getitem__(args)
445
+
446
+ def __getitem__(self, index):
447
+ tensor = self.tensor
448
+
449
+ def neg_step(i, s):
450
+ if not (isinstance(s, slice) and s.step is not None and s.step < 0):
451
+ return s
452
+
453
+ nonlocal tensor
454
+ tensor = torch.flip(tensor, (i,))
455
+
456
+ # Account for the fact that a slice includes the start but not the end
457
+ assert isinstance(s.start, int) or s.start is None
458
+ assert isinstance(s.stop, int) or s.stop is None
459
+ start = s.stop + 1 if s.stop else None
460
+ stop = s.start + 1 if s.start else None
461
+
462
+ return slice(start, stop, -s.step)
463
+
464
+ if isinstance(index, Sequence):
465
+ index = type(index)(neg_step(i, s) for i, s in enumerate(index))
466
+ else:
467
+ index = neg_step(0, index)
468
+ index = _util.ndarrays_to_tensors(index)
469
+ index = _upcast_int_indices(index)
470
+ return ndarray(tensor.__getitem__(index))
471
+
472
+ def __setitem__(self, index, value):
473
+ index = _util.ndarrays_to_tensors(index)
474
+ index = _upcast_int_indices(index)
475
+
476
+ if not _dtypes_impl.is_scalar(value):
477
+ value = normalize_array_like(value)
478
+ value = _util.cast_if_needed(value, self.tensor.dtype)
479
+
480
+ return self.tensor.__setitem__(index, value)
481
+
482
+ take = _funcs.take
483
+ put = _funcs.put
484
+
485
+ def __dlpack__(self, *, stream=None):
486
+ return self.tensor.__dlpack__(stream=stream)
487
+
488
+ def __dlpack_device__(self):
489
+ return self.tensor.__dlpack_device__()
490
+
491
+
492
+ def _tolist(obj):
493
+ """Recursively convert tensors into lists."""
494
+ a1 = []
495
+ for elem in obj:
496
+ if isinstance(elem, (list, tuple)):
497
+ elem = _tolist(elem)
498
+ if isinstance(elem, ndarray):
499
+ a1.append(elem.tensor.tolist())
500
+ else:
501
+ a1.append(elem)
502
+ return a1
503
+
504
+
505
+ # This is the ideally the only place which talks to ndarray directly.
506
+ # The rest goes through asarray (preferred) or array.
507
+
508
+
509
+ def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
510
+ if subok is not False:
511
+ raise NotImplementedError("'subok' parameter is not supported.")
512
+ if like is not None:
513
+ raise NotImplementedError("'like' parameter is not supported.")
514
+ if order != "K":
515
+ raise NotImplementedError()
516
+
517
+ # a happy path
518
+ if (
519
+ isinstance(obj, ndarray)
520
+ and copy is False
521
+ and dtype is None
522
+ and ndmin <= obj.ndim
523
+ ):
524
+ return obj
525
+
526
+ if isinstance(obj, (list, tuple)):
527
+ # FIXME and they have the same dtype, device, etc
528
+ if obj and all(isinstance(x, torch.Tensor) for x in obj):
529
+ # list of arrays: *under torch.Dynamo* these are FakeTensors
530
+ obj = torch.stack(obj)
531
+ else:
532
+ # XXX: remove tolist
533
+ # lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
534
+ obj = _tolist(obj)
535
+
536
+ # is obj an ndarray already?
537
+ if isinstance(obj, ndarray):
538
+ obj = obj.tensor
539
+
540
+ # is a specific dtype requested?
541
+ torch_dtype = None
542
+ if dtype is not None:
543
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
544
+
545
+ tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
546
+ return ndarray(tensor)
547
+
548
+
549
+ def asarray(a, dtype=None, order="K", *, like=None):
550
+ return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
551
+
552
+
553
+ def ascontiguousarray(a, dtype=None, *, like=None):
554
+ arr = asarray(a, dtype=dtype, like=like)
555
+ if not arr.tensor.is_contiguous():
556
+ arr.tensor = arr.tensor.contiguous()
557
+ return arr
558
+
559
+
560
+ def from_dlpack(x, /):
561
+ t = torch.from_dlpack(x)
562
+ return ndarray(t)
563
+
564
+
565
+ def _extract_dtype(entry):
566
+ try:
567
+ dty = _dtypes.dtype(entry)
568
+ except Exception:
569
+ dty = asarray(entry).dtype
570
+ return dty
571
+
572
+
573
+ def can_cast(from_, to, casting="safe"):
574
+ from_ = _extract_dtype(from_)
575
+ to_ = _extract_dtype(to)
576
+
577
+ return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
578
+
579
+
580
+ def result_type(*arrays_and_dtypes):
581
+ tensors = []
582
+ for entry in arrays_and_dtypes:
583
+ try:
584
+ t = asarray(entry).tensor
585
+ except (RuntimeError, ValueError, TypeError):
586
+ dty = _dtypes.dtype(entry)
587
+ t = torch.empty(1, dtype=dty.torch_dtype)
588
+ tensors.append(t)
589
+
590
+ torch_dtype = _dtypes_impl.result_type_impl(*tensors)
591
+ return _dtypes.dtype(torch_dtype)
venv/lib/python3.10/site-packages/torch/_numpy/_normalizations.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import functools
8
+ import inspect
9
+ import operator
10
+ import typing
11
+
12
+ import torch
13
+
14
+ from . import _dtypes, _dtypes_impl, _util
15
+
16
+ ArrayLike = typing.TypeVar("ArrayLike")
17
+ Scalar = typing.Union[int, float, complex, bool]
18
+ ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar]
19
+
20
+ DTypeLike = typing.TypeVar("DTypeLike")
21
+ AxisLike = typing.TypeVar("AxisLike")
22
+ NDArray = typing.TypeVar("NDArray")
23
+ CastingModes = typing.TypeVar("CastingModes")
24
+ KeepDims = typing.TypeVar("KeepDims")
25
+
26
+ # OutArray is to annotate the out= array argument.
27
+ #
28
+ # This one is special is several respects:
29
+ # First, It needs to be an NDArray, and we need to preserve the `result is out`
30
+ # semantics. Therefore, we cannot just extract the Tensor from the out array.
31
+ # So we never pass the out array to implementer functions and handle it in the
32
+ # `normalizer` below.
33
+ # Second, the out= argument can be either keyword or positional argument, and
34
+ # as a positional arg, it can be anywhere in the signature.
35
+ # To handle all this, we define a special `OutArray` annotation and dispatch on it.
36
+ #
37
+ OutArray = typing.TypeVar("OutArray")
38
+
39
+ try:
40
+ from typing import NotImplementedType
41
+ except ImportError:
42
+ NotImplementedType = typing.TypeVar("NotImplementedType")
43
+
44
+
45
+ def normalize_array_like(x, parm=None):
46
+ from ._ndarray import asarray
47
+
48
+ return asarray(x).tensor
49
+
50
+
51
+ def normalize_array_like_or_scalar(x, parm=None):
52
+ if _dtypes_impl.is_scalar_or_symbolic(x):
53
+ return x
54
+ return normalize_array_like(x, parm)
55
+
56
+
57
+ def normalize_optional_array_like_or_scalar(x, parm=None):
58
+ if x is None:
59
+ return None
60
+ return normalize_array_like_or_scalar(x, parm)
61
+
62
+
63
+ def normalize_optional_array_like(x, parm=None):
64
+ # This explicit normalizer is needed because otherwise normalize_array_like
65
+ # does not run for a parameter annotated as Optional[ArrayLike]
66
+ return None if x is None else normalize_array_like(x, parm)
67
+
68
+
69
+ def normalize_seq_array_like(x, parm=None):
70
+ return tuple(normalize_array_like(value) for value in x)
71
+
72
+
73
+ def normalize_dtype(dtype, parm=None):
74
+ # cf _decorators.dtype_to_torch
75
+ torch_dtype = None
76
+ if dtype is not None:
77
+ dtype = _dtypes.dtype(dtype)
78
+ torch_dtype = dtype.torch_dtype
79
+ return torch_dtype
80
+
81
+
82
+ def normalize_not_implemented(arg, parm):
83
+ if arg != parm.default:
84
+ raise NotImplementedError(f"'{parm.name}' parameter is not supported.")
85
+
86
+
87
+ def normalize_axis_like(arg, parm=None):
88
+ from ._ndarray import ndarray
89
+
90
+ if isinstance(arg, ndarray):
91
+ arg = operator.index(arg)
92
+ return arg
93
+
94
+
95
+ def normalize_ndarray(arg, parm=None):
96
+ # check the arg is an ndarray, extract its tensor attribute
97
+ if arg is None:
98
+ return arg
99
+
100
+ from ._ndarray import ndarray
101
+
102
+ if not isinstance(arg, ndarray):
103
+ raise TypeError(f"'{parm.name}' must be an array")
104
+ return arg.tensor
105
+
106
+
107
+ def normalize_outarray(arg, parm=None):
108
+ # almost normalize_ndarray, only return the array, not its tensor
109
+ if arg is None:
110
+ return arg
111
+ from ._ndarray import ndarray
112
+
113
+ # Dynamo can pass torch tensors as out arguments,
114
+ # wrap it in an ndarray before processing
115
+ if isinstance(arg, torch.Tensor):
116
+ arg = ndarray(arg)
117
+
118
+ if not isinstance(arg, ndarray):
119
+ raise TypeError(f"'{parm.name}' must be an array")
120
+ return arg
121
+
122
+
123
+ def normalize_casting(arg, parm=None):
124
+ if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]:
125
+ raise ValueError(
126
+ f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')"
127
+ )
128
+ return arg
129
+
130
+
131
+ normalizers = {
132
+ "ArrayLike": normalize_array_like,
133
+ "ArrayLikeOrScalar": normalize_array_like_or_scalar,
134
+ "Optional[ArrayLike]": normalize_optional_array_like,
135
+ "Sequence[ArrayLike]": normalize_seq_array_like,
136
+ "Optional[ArrayLikeOrScalar]": normalize_optional_array_like_or_scalar,
137
+ "Optional[NDArray]": normalize_ndarray,
138
+ "Optional[OutArray]": normalize_outarray,
139
+ "NDArray": normalize_ndarray,
140
+ "Optional[DTypeLike]": normalize_dtype,
141
+ "AxisLike": normalize_axis_like,
142
+ "NotImplementedType": normalize_not_implemented,
143
+ "Optional[CastingModes]": normalize_casting,
144
+ }
145
+
146
+
147
+ def maybe_normalize(arg, parm):
148
+ """Normalize arg if a normalizer is registered."""
149
+ normalizer = normalizers.get(parm.annotation, None)
150
+ return normalizer(arg, parm) if normalizer else arg
151
+
152
+
153
+ # ### Return value helpers ###
154
+
155
+
156
+ def maybe_copy_to(out, result, promote_scalar_result=False):
157
+ # NB: here out is either an ndarray or None
158
+ if out is None:
159
+ return result
160
+ elif isinstance(result, torch.Tensor):
161
+ if result.shape != out.shape:
162
+ can_fit = result.numel() == 1 and out.ndim == 0
163
+ if promote_scalar_result and can_fit:
164
+ result = result.squeeze()
165
+ else:
166
+ raise ValueError(
167
+ f"Bad size of the out array: out.shape = {out.shape}"
168
+ f" while result.shape = {result.shape}."
169
+ )
170
+ out.tensor.copy_(result)
171
+ return out
172
+ elif isinstance(result, (tuple, list)):
173
+ return type(result)(
174
+ maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result)
175
+ )
176
+ else:
177
+ raise AssertionError() # We should never hit this path
178
+
179
+
180
+ def wrap_tensors(result):
181
+ from ._ndarray import ndarray
182
+
183
+ if isinstance(result, torch.Tensor):
184
+ return ndarray(result)
185
+ elif isinstance(result, (tuple, list)):
186
+ result = type(result)(wrap_tensors(x) for x in result)
187
+ return result
188
+
189
+
190
+ def array_or_scalar(values, py_type=float, return_scalar=False):
191
+ if return_scalar:
192
+ return py_type(values.item())
193
+ else:
194
+ from ._ndarray import ndarray
195
+
196
+ return ndarray(values)
197
+
198
+
199
+ # ### The main decorator to normalize arguments / postprocess the output ###
200
+
201
+
202
+ def normalizer(_func=None, *, promote_scalar_result=False):
203
+ def normalizer_inner(func):
204
+ @functools.wraps(func)
205
+ def wrapped(*args, **kwds):
206
+ sig = inspect.signature(func)
207
+ params = sig.parameters
208
+ first_param = next(iter(params.values()))
209
+
210
+ # NumPy's API does not have positional args before variadic positional args
211
+ if first_param.kind == inspect.Parameter.VAR_POSITIONAL:
212
+ args = [maybe_normalize(arg, first_param) for arg in args]
213
+ else:
214
+ # NB: extra unknown arguments: pass through, will raise in func(*args) below
215
+ args = (
216
+ tuple(
217
+ maybe_normalize(arg, parm)
218
+ for arg, parm in zip(args, params.values())
219
+ )
220
+ + args[len(params.values()) :]
221
+ )
222
+
223
+ kwds = {
224
+ name: maybe_normalize(arg, params[name]) if name in params else arg
225
+ for name, arg in kwds.items()
226
+ }
227
+
228
+ result = func(*args, **kwds)
229
+
230
+ # keepdims
231
+ bound_args = None
232
+ if "keepdims" in params and params["keepdims"].annotation == "KeepDims":
233
+ # keepdims can be in any position so we need sig.bind
234
+ bound_args = sig.bind(*args, **kwds).arguments
235
+ if bound_args.get("keepdims", False):
236
+ # In this case the first arg is the initial tensor and
237
+ # the second arg is (optionally) the axis
238
+ tensor = args[0]
239
+ axis = bound_args.get("axis")
240
+ result = _util.apply_keepdims(result, axis, tensor.ndim)
241
+
242
+ # out
243
+ if "out" in params:
244
+ # out can be in any position so we need sig.bind
245
+ if bound_args is None:
246
+ bound_args = sig.bind(*args, **kwds).arguments
247
+ out = bound_args.get("out")
248
+ result = maybe_copy_to(out, result, promote_scalar_result)
249
+ result = wrap_tensors(result)
250
+
251
+ return result
252
+
253
+ return wrapped
254
+
255
+ if _func is None:
256
+ return normalizer_inner
257
+ else:
258
+ return normalizer_inner(_func)
venv/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ Implementation of reduction operations, to be wrapped into arrays, dtypes etc
4
+ in the 'public' layer.
5
+
6
+ Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import functools
11
+ from typing import Optional
12
+
13
+ import torch
14
+
15
+ from . import _dtypes_impl, _util
16
+ from ._normalizations import (
17
+ ArrayLike,
18
+ AxisLike,
19
+ DTypeLike,
20
+ KeepDims,
21
+ NotImplementedType,
22
+ OutArray,
23
+ )
24
+
25
+
26
+ def _deco_axis_expand(func):
27
+ """
28
+ Generically handle axis arguments in reductions.
29
+ axis is *always* the 2nd arg in the function so no need to have a look at its signature
30
+ """
31
+
32
+ @functools.wraps(func)
33
+ def wrapped(a, axis=None, *args, **kwds):
34
+ if axis is not None:
35
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
36
+
37
+ if axis == ():
38
+ # So we insert a length-one axis and run the reduction along it.
39
+ # We cannot return a.clone() as this would sidestep the checks inside the function
40
+ newshape = _util.expand_shape(a.shape, axis=0)
41
+ a = a.reshape(newshape)
42
+ axis = (0,)
43
+
44
+ return func(a, axis, *args, **kwds)
45
+
46
+ return wrapped
47
+
48
+
49
+ def _atleast_float(dtype, other_dtype):
50
+ """Return a dtype that is real or complex floating-point.
51
+
52
+ For inputs that are boolean or integer dtypes, this returns the default
53
+ float dtype; inputs that are complex get converted to the default complex
54
+ dtype; real floating-point dtypes (`float*`) get passed through unchanged
55
+ """
56
+ if dtype is None:
57
+ dtype = other_dtype
58
+ if not (dtype.is_floating_point or dtype.is_complex):
59
+ return _dtypes_impl.default_dtypes().float_dtype
60
+ return dtype
61
+
62
+
63
+ @_deco_axis_expand
64
+ def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False):
65
+ return a.count_nonzero(axis)
66
+
67
+
68
+ @_deco_axis_expand
69
+ def argmax(
70
+ a: ArrayLike,
71
+ axis: AxisLike = None,
72
+ out: Optional[OutArray] = None,
73
+ *,
74
+ keepdims: KeepDims = False,
75
+ ):
76
+ if a.is_complex():
77
+ raise NotImplementedError(f"argmax with dtype={a.dtype}.")
78
+
79
+ axis = _util.allow_only_single_axis(axis)
80
+
81
+ if a.dtype == torch.bool:
82
+ # RuntimeError: "argmax_cpu" not implemented for 'Bool'
83
+ a = a.to(torch.uint8)
84
+
85
+ return torch.argmax(a, axis)
86
+
87
+
88
+ @_deco_axis_expand
89
+ def argmin(
90
+ a: ArrayLike,
91
+ axis: AxisLike = None,
92
+ out: Optional[OutArray] = None,
93
+ *,
94
+ keepdims: KeepDims = False,
95
+ ):
96
+ if a.is_complex():
97
+ raise NotImplementedError(f"argmin with dtype={a.dtype}.")
98
+
99
+ axis = _util.allow_only_single_axis(axis)
100
+
101
+ if a.dtype == torch.bool:
102
+ # RuntimeError: "argmin_cpu" not implemented for 'Bool'
103
+ a = a.to(torch.uint8)
104
+
105
+ return torch.argmin(a, axis)
106
+
107
+
108
+ @_deco_axis_expand
109
+ def any(
110
+ a: ArrayLike,
111
+ axis: AxisLike = None,
112
+ out: Optional[OutArray] = None,
113
+ keepdims: KeepDims = False,
114
+ *,
115
+ where: NotImplementedType = None,
116
+ ):
117
+ axis = _util.allow_only_single_axis(axis)
118
+ axis_kw = {} if axis is None else {"dim": axis}
119
+ return torch.any(a, **axis_kw)
120
+
121
+
122
+ @_deco_axis_expand
123
+ def all(
124
+ a: ArrayLike,
125
+ axis: AxisLike = None,
126
+ out: Optional[OutArray] = None,
127
+ keepdims: KeepDims = False,
128
+ *,
129
+ where: NotImplementedType = None,
130
+ ):
131
+ axis = _util.allow_only_single_axis(axis)
132
+ axis_kw = {} if axis is None else {"dim": axis}
133
+ return torch.all(a, **axis_kw)
134
+
135
+
136
+ @_deco_axis_expand
137
+ def amax(
138
+ a: ArrayLike,
139
+ axis: AxisLike = None,
140
+ out: Optional[OutArray] = None,
141
+ keepdims: KeepDims = False,
142
+ initial: NotImplementedType = None,
143
+ where: NotImplementedType = None,
144
+ ):
145
+ if a.is_complex():
146
+ raise NotImplementedError(f"amax with dtype={a.dtype}")
147
+
148
+ return a.amax(axis)
149
+
150
+
151
+ max = amax
152
+
153
+
154
+ @_deco_axis_expand
155
+ def amin(
156
+ a: ArrayLike,
157
+ axis: AxisLike = None,
158
+ out: Optional[OutArray] = None,
159
+ keepdims: KeepDims = False,
160
+ initial: NotImplementedType = None,
161
+ where: NotImplementedType = None,
162
+ ):
163
+ if a.is_complex():
164
+ raise NotImplementedError(f"amin with dtype={a.dtype}")
165
+
166
+ return a.amin(axis)
167
+
168
+
169
+ min = amin
170
+
171
+
172
+ @_deco_axis_expand
173
+ def ptp(
174
+ a: ArrayLike,
175
+ axis: AxisLike = None,
176
+ out: Optional[OutArray] = None,
177
+ keepdims: KeepDims = False,
178
+ ):
179
+ return a.amax(axis) - a.amin(axis)
180
+
181
+
182
+ @_deco_axis_expand
183
+ def sum(
184
+ a: ArrayLike,
185
+ axis: AxisLike = None,
186
+ dtype: Optional[DTypeLike] = None,
187
+ out: Optional[OutArray] = None,
188
+ keepdims: KeepDims = False,
189
+ initial: NotImplementedType = None,
190
+ where: NotImplementedType = None,
191
+ ):
192
+ assert dtype is None or isinstance(dtype, torch.dtype)
193
+
194
+ if dtype == torch.bool:
195
+ dtype = _dtypes_impl.default_dtypes().int_dtype
196
+
197
+ axis_kw = {} if axis is None else {"dim": axis}
198
+ return a.sum(dtype=dtype, **axis_kw)
199
+
200
+
201
+ @_deco_axis_expand
202
+ def prod(
203
+ a: ArrayLike,
204
+ axis: AxisLike = None,
205
+ dtype: Optional[DTypeLike] = None,
206
+ out: Optional[OutArray] = None,
207
+ keepdims: KeepDims = False,
208
+ initial: NotImplementedType = None,
209
+ where: NotImplementedType = None,
210
+ ):
211
+ axis = _util.allow_only_single_axis(axis)
212
+
213
+ if dtype == torch.bool:
214
+ dtype = _dtypes_impl.default_dtypes().int_dtype
215
+
216
+ axis_kw = {} if axis is None else {"dim": axis}
217
+ return a.prod(dtype=dtype, **axis_kw)
218
+
219
+
220
+ product = prod
221
+
222
+
223
+ @_deco_axis_expand
224
+ def mean(
225
+ a: ArrayLike,
226
+ axis: AxisLike = None,
227
+ dtype: Optional[DTypeLike] = None,
228
+ out: Optional[OutArray] = None,
229
+ keepdims: KeepDims = False,
230
+ *,
231
+ where: NotImplementedType = None,
232
+ ):
233
+ dtype = _atleast_float(dtype, a.dtype)
234
+
235
+ axis_kw = {} if axis is None else {"dim": axis}
236
+ result = a.mean(dtype=dtype, **axis_kw)
237
+
238
+ return result
239
+
240
+
241
+ @_deco_axis_expand
242
+ def std(
243
+ a: ArrayLike,
244
+ axis: AxisLike = None,
245
+ dtype: Optional[DTypeLike] = None,
246
+ out: Optional[OutArray] = None,
247
+ ddof=0,
248
+ keepdims: KeepDims = False,
249
+ *,
250
+ where: NotImplementedType = None,
251
+ ):
252
+ in_dtype = dtype
253
+ dtype = _atleast_float(dtype, a.dtype)
254
+ tensor = _util.cast_if_needed(a, dtype)
255
+ result = tensor.std(dim=axis, correction=ddof)
256
+ return _util.cast_if_needed(result, in_dtype)
257
+
258
+
259
+ @_deco_axis_expand
260
+ def var(
261
+ a: ArrayLike,
262
+ axis: AxisLike = None,
263
+ dtype: Optional[DTypeLike] = None,
264
+ out: Optional[OutArray] = None,
265
+ ddof=0,
266
+ keepdims: KeepDims = False,
267
+ *,
268
+ where: NotImplementedType = None,
269
+ ):
270
+ in_dtype = dtype
271
+ dtype = _atleast_float(dtype, a.dtype)
272
+ tensor = _util.cast_if_needed(a, dtype)
273
+ result = tensor.var(dim=axis, correction=ddof)
274
+ return _util.cast_if_needed(result, in_dtype)
275
+
276
+
277
+ # cumsum / cumprod are almost reductions:
278
+ # 1. no keepdims
279
+ # 2. axis=None flattens
280
+
281
+
282
+ def cumsum(
283
+ a: ArrayLike,
284
+ axis: AxisLike = None,
285
+ dtype: Optional[DTypeLike] = None,
286
+ out: Optional[OutArray] = None,
287
+ ):
288
+ if dtype == torch.bool:
289
+ dtype = _dtypes_impl.default_dtypes().int_dtype
290
+ if dtype is None:
291
+ dtype = a.dtype
292
+
293
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
294
+ axis = _util.normalize_axis_index(axis, a.ndim)
295
+
296
+ return a.cumsum(axis=axis, dtype=dtype)
297
+
298
+
299
+ def cumprod(
300
+ a: ArrayLike,
301
+ axis: AxisLike = None,
302
+ dtype: Optional[DTypeLike] = None,
303
+ out: Optional[OutArray] = None,
304
+ ):
305
+ if dtype == torch.bool:
306
+ dtype = _dtypes_impl.default_dtypes().int_dtype
307
+ if dtype is None:
308
+ dtype = a.dtype
309
+
310
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
311
+ axis = _util.normalize_axis_index(axis, a.ndim)
312
+
313
+ return a.cumprod(axis=axis, dtype=dtype)
314
+
315
+
316
+ cumproduct = cumprod
317
+
318
+
319
+ def average(
320
+ a: ArrayLike,
321
+ axis=None,
322
+ weights: ArrayLike = None,
323
+ returned=False,
324
+ *,
325
+ keepdims=False,
326
+ ):
327
+ if weights is None:
328
+ result = mean(a, axis=axis)
329
+ wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype)
330
+ else:
331
+ if not a.dtype.is_floating_point:
332
+ a = a.double()
333
+
334
+ # axis & weights
335
+ if a.shape != weights.shape:
336
+ if axis is None:
337
+ raise TypeError(
338
+ "Axis must be specified when shapes of a and weights differ."
339
+ )
340
+ if weights.ndim != 1:
341
+ raise TypeError(
342
+ "1D weights expected when shapes of a and weights differ."
343
+ )
344
+ if weights.shape[0] != a.shape[axis]:
345
+ raise ValueError(
346
+ "Length of weights not compatible with specified axis."
347
+ )
348
+
349
+ # setup weight to broadcast along axis
350
+ weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape)
351
+ weights = weights.swapaxes(-1, axis)
352
+
353
+ # do the work
354
+ result_dtype = _dtypes_impl.result_type_impl(a, weights)
355
+ numerator = sum(a * weights, axis, dtype=result_dtype)
356
+ wsum = sum(weights, axis, dtype=result_dtype)
357
+ result = numerator / wsum
358
+
359
+ # We process keepdims manually because the decorator does not deal with variadic returns
360
+ if keepdims:
361
+ result = _util.apply_keepdims(result, axis, a.ndim)
362
+
363
+ if returned:
364
+ if wsum.shape != result.shape:
365
+ wsum = torch.broadcast_to(wsum, result.shape).clone()
366
+ return result, wsum
367
+ else:
368
+ return result
369
+
370
+
371
+ # Not using deco_axis_expand as it assumes that axis is the second arg
372
+ def quantile(
373
+ a: ArrayLike,
374
+ q: ArrayLike,
375
+ axis: AxisLike = None,
376
+ out: Optional[OutArray] = None,
377
+ overwrite_input=False,
378
+ method="linear",
379
+ keepdims: KeepDims = False,
380
+ *,
381
+ interpolation: NotImplementedType = None,
382
+ ):
383
+ if overwrite_input:
384
+ # raise NotImplementedError("overwrite_input in quantile not implemented.")
385
+ # NumPy documents that `overwrite_input` MAY modify inputs:
386
+ # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile
387
+ # Here we choose to work out-of-place because why not.
388
+ pass
389
+
390
+ if not a.dtype.is_floating_point:
391
+ dtype = _dtypes_impl.default_dtypes().float_dtype
392
+ a = a.to(dtype)
393
+
394
+ # edge case: torch.quantile only supports float32 and float64
395
+ if a.dtype == torch.float16:
396
+ a = a.to(torch.float32)
397
+
398
+ if axis is None:
399
+ a = a.flatten()
400
+ q = q.flatten()
401
+ axis = (0,)
402
+ else:
403
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
404
+
405
+ # FIXME(Mario) Doesn't np.quantile accept a tuple?
406
+ # torch.quantile does accept a number. If we don't want to implement the tuple behaviour
407
+ # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above.
408
+ axis = _util.allow_only_single_axis(axis)
409
+
410
+ q = _util.cast_if_needed(q, a.dtype)
411
+
412
+ return torch.quantile(a, q, axis=axis, interpolation=method)
413
+
414
+
415
+ def percentile(
416
+ a: ArrayLike,
417
+ q: ArrayLike,
418
+ axis: AxisLike = None,
419
+ out: Optional[OutArray] = None,
420
+ overwrite_input=False,
421
+ method="linear",
422
+ keepdims: KeepDims = False,
423
+ *,
424
+ interpolation: NotImplementedType = None,
425
+ ):
426
+ # np.percentile(float_tensor, 30) : q.dtype is int64 => q / 100.0 is float32
427
+ if _dtypes_impl.python_type_for_torch(q.dtype) == int:
428
+ q = q.to(_dtypes_impl.default_dtypes().float_dtype)
429
+ qq = q / 100.0
430
+
431
+ return quantile(
432
+ a,
433
+ qq,
434
+ axis=axis,
435
+ overwrite_input=overwrite_input,
436
+ method=method,
437
+ keepdims=keepdims,
438
+ interpolation=interpolation,
439
+ )
440
+
441
+
442
+ def median(
443
+ a: ArrayLike,
444
+ axis=None,
445
+ out: Optional[OutArray] = None,
446
+ overwrite_input=False,
447
+ keepdims: KeepDims = False,
448
+ ):
449
+ return quantile(
450
+ a,
451
+ torch.as_tensor(0.5),
452
+ axis=axis,
453
+ overwrite_input=overwrite_input,
454
+ out=out,
455
+ keepdims=keepdims,
456
+ )
venv/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+
7
+ import torch
8
+
9
+ from . import _binary_ufuncs_impl, _dtypes_impl, _unary_ufuncs_impl, _util
10
+ from ._normalizations import (
11
+ ArrayLike,
12
+ ArrayLikeOrScalar,
13
+ CastingModes,
14
+ DTypeLike,
15
+ normalizer,
16
+ NotImplementedType,
17
+ OutArray,
18
+ )
19
+
20
+
21
+ def _ufunc_postprocess(result, out, casting):
22
+ if out is not None:
23
+ result = _util.typecast_tensor(result, out.dtype.torch_dtype, casting)
24
+ result = torch.broadcast_to(result, out.shape)
25
+ return result
26
+
27
+
28
+ # ############# Binary ufuncs ######################
29
+
30
+ _binary = [
31
+ name
32
+ for name in dir(_binary_ufuncs_impl)
33
+ if not name.startswith("_") and name not in ["torch", "matmul", "divmod", "ldexp"]
34
+ ]
35
+
36
+
37
+ NEP50_FUNCS = (
38
+ "add",
39
+ "subtract",
40
+ "multiply",
41
+ "floor_divide",
42
+ "true_divide",
43
+ "divide",
44
+ "remainder",
45
+ "bitwise_and",
46
+ "bitwise_or",
47
+ "bitwise_xor",
48
+ "bitwise_left_shift",
49
+ "bitwise_right_shift",
50
+ "hypot",
51
+ "arctan2",
52
+ "logaddexp",
53
+ "logaddexp2",
54
+ "heaviside",
55
+ "copysign",
56
+ "fmax",
57
+ "minimum",
58
+ "fmin",
59
+ "maximum",
60
+ "fmod",
61
+ "gcd",
62
+ "lcm",
63
+ "pow",
64
+ )
65
+
66
+
67
+ def deco_binary_ufunc(torch_func):
68
+ """Common infra for binary ufuncs.
69
+
70
+ Normalize arguments, sort out type casting, broadcasting and delegate to
71
+ the pytorch functions for the actual work.
72
+ """
73
+
74
+ @normalizer
75
+ def wrapped(
76
+ x1: ArrayLikeOrScalar,
77
+ x2: ArrayLikeOrScalar,
78
+ /,
79
+ out: Optional[OutArray] = None,
80
+ *,
81
+ where: NotImplementedType = True,
82
+ casting: Optional[CastingModes] = "same_kind",
83
+ order: NotImplementedType = "K",
84
+ dtype: Optional[DTypeLike] = None,
85
+ subok: NotImplementedType = False,
86
+ signature: NotImplementedType = None,
87
+ extobj: NotImplementedType = None,
88
+ ):
89
+ if dtype is not None:
90
+
91
+ def cast(x, dtype):
92
+ if isinstance(x, torch.Tensor):
93
+ return _util.typecast_tensor(x, dtype, casting)
94
+ else:
95
+ return torch.as_tensor(x, dtype=dtype)
96
+
97
+ x1 = cast(x1, dtype)
98
+ x2 = cast(x2, dtype)
99
+ elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor):
100
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
101
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
102
+ else:
103
+ x1, x2 = _dtypes_impl.nep50_to_tensors(
104
+ x1, x2, torch_func.__name__ in NEP50_FUNCS, torch_func.__name__
105
+ )
106
+
107
+ result = torch_func(x1, x2)
108
+
109
+ return _ufunc_postprocess(result, out, casting)
110
+
111
+ wrapped.__qualname__ = torch_func.__name__
112
+ wrapped.__name__ = torch_func.__name__
113
+
114
+ return wrapped
115
+
116
+
117
+ # matmul's signature is _slightly_ different from other ufuncs:
118
+ # - no where=...
119
+ # - additional axis=..., axes=...
120
+ # - no NEP50 scalars in or out
121
+ @normalizer
122
+ def matmul(
123
+ x1: ArrayLike,
124
+ x2: ArrayLike,
125
+ /,
126
+ out: Optional[OutArray] = None,
127
+ *,
128
+ casting: Optional[CastingModes] = "same_kind",
129
+ order: NotImplementedType = "K",
130
+ dtype: Optional[DTypeLike] = None,
131
+ subok: NotImplementedType = False,
132
+ signature: NotImplementedType = None,
133
+ extobj: NotImplementedType = None,
134
+ axes: NotImplementedType = None,
135
+ axis: NotImplementedType = None,
136
+ ):
137
+ if dtype is None:
138
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
139
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
140
+
141
+ result = _binary_ufuncs_impl.matmul(x1, x2)
142
+
143
+ result = _ufunc_postprocess(result, out, casting)
144
+ return result
145
+
146
+
147
+ # ldexp casting is special : the dtype of the result == dtype of the 1st arg
148
+ @normalizer
149
+ def ldexp(
150
+ x1: ArrayLikeOrScalar,
151
+ x2: ArrayLikeOrScalar,
152
+ /,
153
+ out: Optional[OutArray] = None,
154
+ *,
155
+ where: NotImplementedType = True,
156
+ casting: Optional[CastingModes] = "same_kind",
157
+ order: NotImplementedType = "K",
158
+ dtype: Optional[DTypeLike] = None,
159
+ subok: NotImplementedType = False,
160
+ signature: NotImplementedType = None,
161
+ extobj: NotImplementedType = None,
162
+ ):
163
+ if dtype is not None:
164
+ if isinstance(x1, torch.Tensor):
165
+ x1 = _util.typecast_tensor(x1, dtype, casting)
166
+ else:
167
+ x1 = torch.as_tensor(x1, dtype=dtype)
168
+ else:
169
+ if not isinstance(x1, torch.Tensor):
170
+ x1 = torch.as_tensor(x1)
171
+ x1 = _util.cast_int_to_float(x1)
172
+
173
+ x2 = torch.as_tensor(x2)
174
+ # the second arg must be integer
175
+ if _dtypes_impl._category(x2.dtype) != 1:
176
+ raise ValueError("ldexp 2nd arg must be integer")
177
+
178
+ result = _binary_ufuncs_impl.ldexp(x1, x2)
179
+
180
+ if x1.dtype == torch.float16:
181
+ # torch.ldexp(f16, int) -> f32, undo it
182
+ result = result.to(torch.float16)
183
+
184
+ return _ufunc_postprocess(result, out, casting)
185
+
186
+
187
+ # nin=2, nout=2
188
+ @normalizer
189
+ def divmod(
190
+ x1: ArrayLike,
191
+ x2: ArrayLike,
192
+ out1: Optional[OutArray] = None,
193
+ out2: Optional[OutArray] = None,
194
+ /,
195
+ out: tuple[Optional[OutArray], Optional[OutArray]] = (None, None),
196
+ *,
197
+ where: NotImplementedType = True,
198
+ casting: Optional[CastingModes] = "same_kind",
199
+ order: NotImplementedType = "K",
200
+ dtype: Optional[DTypeLike] = None,
201
+ subok: NotImplementedType = False,
202
+ signature: NotImplementedType = None,
203
+ extobj: NotImplementedType = None,
204
+ ):
205
+ # make sure we either have no out arrays at all, or there is either
206
+ # out1, out2, or out=tuple, but not both
207
+ num_outs = sum(x is not None for x in [out1, out2])
208
+ if num_outs == 1:
209
+ raise ValueError("both out1 and out2 need to be provided")
210
+ elif num_outs == 2:
211
+ o1, o2 = out
212
+ if o1 is not None or o2 is not None:
213
+ raise TypeError(
214
+ "cannot specify 'out' as both a positional and keyword argument"
215
+ )
216
+ else:
217
+ out1, out2 = out
218
+
219
+ if dtype is None:
220
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
221
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
222
+
223
+ quot, rem = _binary_ufuncs_impl.divmod(x1, x2)
224
+
225
+ quot = _ufunc_postprocess(quot, out1, casting)
226
+ rem = _ufunc_postprocess(rem, out2, casting)
227
+ return quot, rem
228
+
229
+
230
+ #
231
+ # Attach ufuncs to this module, for a further export to the public namespace in __init__.py
232
+ #
233
+ for name in _binary:
234
+ ufunc = getattr(_binary_ufuncs_impl, name)
235
+ vars()[name] = deco_binary_ufunc(ufunc)
236
+
237
+
238
+ def modf(x, /, *args, **kwds):
239
+ quot, rem = divmod(x, 1, *args, **kwds)
240
+ return rem, quot
241
+
242
+
243
+ _binary = _binary + ["divmod", "modf", "matmul", "ldexp"]
244
+
245
+
246
+ # ############# Unary ufuncs ######################
247
+
248
+
249
+ _unary = [
250
+ name
251
+ for name in dir(_unary_ufuncs_impl)
252
+ if not name.startswith("_") and name != "torch"
253
+ ]
254
+
255
+
256
+ # these are ufunc(int) -> float
257
+ _fp_unary = [
258
+ "arccos",
259
+ "arccosh",
260
+ "arcsin",
261
+ "arcsinh",
262
+ "arctan",
263
+ "arctanh",
264
+ "cbrt",
265
+ "cos",
266
+ "cosh",
267
+ "deg2rad",
268
+ "degrees",
269
+ "exp",
270
+ "exp2",
271
+ "expm1",
272
+ "log",
273
+ "log10",
274
+ "log1p",
275
+ "log2",
276
+ "rad2deg",
277
+ "radians",
278
+ "reciprocal",
279
+ "sin",
280
+ "sinh",
281
+ "sqrt",
282
+ "square",
283
+ "tan",
284
+ "tanh",
285
+ "trunc",
286
+ ]
287
+
288
+
289
+ def deco_unary_ufunc(torch_func):
290
+ """Common infra for unary ufuncs.
291
+
292
+ Normalize arguments, sort out type casting, broadcasting and delegate to
293
+ the pytorch functions for the actual work.
294
+ """
295
+
296
+ @normalizer
297
+ def wrapped(
298
+ x: ArrayLike,
299
+ /,
300
+ out: Optional[OutArray] = None,
301
+ *,
302
+ where=True,
303
+ casting: Optional[CastingModes] = "same_kind",
304
+ order="K",
305
+ dtype: Optional[DTypeLike] = None,
306
+ subok: NotImplementedType = False,
307
+ signature=None,
308
+ extobj=None,
309
+ ):
310
+ if dtype is not None:
311
+ x = _util.typecast_tensor(x, dtype, casting)
312
+
313
+ if torch_func.__name__ in _fp_unary:
314
+ x = _util.cast_int_to_float(x)
315
+
316
+ result = torch_func(x)
317
+ result = _ufunc_postprocess(result, out, casting)
318
+ return result
319
+
320
+ wrapped.__qualname__ = torch_func.__name__
321
+ wrapped.__name__ = torch_func.__name__
322
+
323
+ return wrapped
324
+
325
+
326
+ #
327
+ # Attach ufuncs to this module, for a further export to the public namespace in __init__.py
328
+ #
329
+ for name in _unary:
330
+ ufunc = getattr(_unary_ufuncs_impl, name)
331
+ vars()[name] = deco_unary_ufunc(ufunc)
332
+
333
+
334
+ __all__ = _binary + _unary # noqa: PLE0605
venv/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Export torch work functions for unary ufuncs, rename/tweak to match numpy.
4
+ This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module.
5
+ """
6
+
7
+ import torch
8
+
9
+ from torch import ( # noqa: F401
10
+ absolute as fabs, # noqa: F401
11
+ arccos, # noqa: F401
12
+ arccosh, # noqa: F401
13
+ arcsin, # noqa: F401
14
+ arcsinh, # noqa: F401
15
+ arctan, # noqa: F401
16
+ arctanh, # noqa: F401
17
+ bitwise_not, # noqa: F401
18
+ bitwise_not as invert, # noqa: F401
19
+ ceil, # noqa: F401
20
+ conj_physical as conjugate, # noqa: F401
21
+ cos, # noqa: F401
22
+ cosh, # noqa: F401
23
+ deg2rad, # noqa: F401
24
+ deg2rad as radians, # noqa: F401
25
+ exp, # noqa: F401
26
+ exp2, # noqa: F401
27
+ expm1, # noqa: F401
28
+ floor, # noqa: F401
29
+ isfinite, # noqa: F401
30
+ isinf, # noqa: F401
31
+ isnan, # noqa: F401
32
+ log, # noqa: F401
33
+ log10, # noqa: F401
34
+ log1p, # noqa: F401
35
+ log2, # noqa: F401
36
+ logical_not, # noqa: F401
37
+ negative, # noqa: F401
38
+ rad2deg, # noqa: F401
39
+ rad2deg as degrees, # noqa: F401
40
+ reciprocal, # noqa: F401
41
+ round as fix, # noqa: F401
42
+ round as rint, # noqa: F401
43
+ sign, # noqa: F401
44
+ signbit, # noqa: F401
45
+ sin, # noqa: F401
46
+ sinh, # noqa: F401
47
+ sqrt, # noqa: F401
48
+ square, # noqa: F401
49
+ tan, # noqa: F401
50
+ tanh, # noqa: F401
51
+ trunc, # noqa: F401
52
+ )
53
+
54
+
55
+ # special cases: torch does not export these names
56
+ def cbrt(x):
57
+ return torch.pow(x, 1 / 3)
58
+
59
+
60
+ def positive(x):
61
+ return +x
62
+
63
+
64
+ def absolute(x):
65
+ # work around torch.absolute not impl for bools
66
+ if x.dtype == torch.bool:
67
+ return x
68
+ return torch.absolute(x)
69
+
70
+
71
+ # TODO set __name__ and __qualname__
72
+ abs = absolute
73
+ conj = conjugate
venv/lib/python3.10/site-packages/torch/_numpy/_util.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Assorted utilities, which do not need anything other then torch and stdlib.
4
+ """
5
+
6
+ import operator
7
+
8
+ import torch
9
+
10
+ from . import _dtypes_impl
11
+
12
+
13
+ # https://github.com/numpy/numpy/blob/v1.23.0/numpy/distutils/misc_util.py#L497-L504
14
+ def is_sequence(seq):
15
+ if isinstance(seq, str):
16
+ return False
17
+ try:
18
+ len(seq)
19
+ except Exception:
20
+ return False
21
+ return True
22
+
23
+
24
+ class AxisError(ValueError, IndexError):
25
+ pass
26
+
27
+
28
+ class UFuncTypeError(TypeError, RuntimeError):
29
+ pass
30
+
31
+
32
+ def cast_if_needed(tensor, dtype):
33
+ # NB: no casting if dtype=None
34
+ if dtype is not None and tensor.dtype != dtype:
35
+ tensor = tensor.to(dtype)
36
+ return tensor
37
+
38
+
39
+ def cast_int_to_float(x):
40
+ # cast integers and bools to the default float dtype
41
+ if _dtypes_impl._category(x.dtype) < 2:
42
+ x = x.to(_dtypes_impl.default_dtypes().float_dtype)
43
+ return x
44
+
45
+
46
+ # a replica of the version in ./numpy/numpy/core/src/multiarray/common.h
47
+ def normalize_axis_index(ax, ndim, argname=None):
48
+ if not (-ndim <= ax < ndim):
49
+ raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}")
50
+ if ax < 0:
51
+ ax += ndim
52
+ return ax
53
+
54
+
55
+ # from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378
56
+ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
57
+ """
58
+ Normalizes an axis argument into a tuple of non-negative integer axes.
59
+
60
+ This handles shorthands such as ``1`` and converts them to ``(1,)``,
61
+ as well as performing the handling of negative indices covered by
62
+ `normalize_axis_index`.
63
+
64
+ By default, this forbids axes from being specified multiple times.
65
+ Used internally by multi-axis-checking logic.
66
+
67
+ Parameters
68
+ ----------
69
+ axis : int, iterable of int
70
+ The un-normalized index or indices of the axis.
71
+ ndim : int
72
+ The number of dimensions of the array that `axis` should be normalized
73
+ against.
74
+ argname : str, optional
75
+ A prefix to put before the error message, typically the name of the
76
+ argument.
77
+ allow_duplicate : bool, optional
78
+ If False, the default, disallow an axis from being specified twice.
79
+
80
+ Returns
81
+ -------
82
+ normalized_axes : tuple of int
83
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
84
+ """
85
+ # Optimization to speed-up the most common cases.
86
+ if type(axis) not in (tuple, list):
87
+ try:
88
+ axis = [operator.index(axis)]
89
+ except TypeError:
90
+ pass
91
+ # Going via an iterator directly is slower than via list comprehension.
92
+ axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
93
+ if not allow_duplicate and len(set(axis)) != len(axis):
94
+ if argname:
95
+ raise ValueError(f"repeated axis in `{argname}` argument")
96
+ else:
97
+ raise ValueError("repeated axis")
98
+ return axis
99
+
100
+
101
+ def allow_only_single_axis(axis):
102
+ if axis is None:
103
+ return axis
104
+ if len(axis) != 1:
105
+ raise NotImplementedError("does not handle tuple axis")
106
+ return axis[0]
107
+
108
+
109
+ def expand_shape(arr_shape, axis):
110
+ # taken from numpy 1.23.x, expand_dims function
111
+ if type(axis) not in (list, tuple):
112
+ axis = (axis,)
113
+ out_ndim = len(axis) + len(arr_shape)
114
+ axis = normalize_axis_tuple(axis, out_ndim)
115
+ shape_it = iter(arr_shape)
116
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
117
+ return shape
118
+
119
+
120
+ def apply_keepdims(tensor, axis, ndim):
121
+ if axis is None:
122
+ # tensor was a scalar
123
+ shape = (1,) * ndim
124
+ tensor = tensor.expand(shape).contiguous()
125
+ else:
126
+ shape = expand_shape(tensor.shape, axis)
127
+ tensor = tensor.reshape(shape)
128
+ return tensor
129
+
130
+
131
+ def axis_none_flatten(*tensors, axis=None):
132
+ """Flatten the arrays if axis is None."""
133
+ if axis is None:
134
+ tensors = tuple(ar.flatten() for ar in tensors)
135
+ return tensors, 0
136
+ else:
137
+ return tensors, axis
138
+
139
+
140
+ def typecast_tensor(t, target_dtype, casting):
141
+ """Dtype-cast tensor to target_dtype.
142
+
143
+ Parameters
144
+ ----------
145
+ t : torch.Tensor
146
+ The tensor to cast
147
+ target_dtype : torch dtype object
148
+ The array dtype to cast all tensors to
149
+ casting : str
150
+ The casting mode, see `np.can_cast`
151
+
152
+ Returns
153
+ -------
154
+ `torch.Tensor` of the `target_dtype` dtype
155
+
156
+ Raises
157
+ ------
158
+ ValueError
159
+ if the argument cannot be cast according to the `casting` rule
160
+
161
+ """
162
+ can_cast = _dtypes_impl.can_cast_impl
163
+
164
+ if not can_cast(t.dtype, target_dtype, casting=casting):
165
+ raise TypeError(
166
+ f"Cannot cast array data from {t.dtype} to"
167
+ f" {target_dtype} according to the rule '{casting}'"
168
+ )
169
+ return cast_if_needed(t, target_dtype)
170
+
171
+
172
+ def typecast_tensors(tensors, target_dtype, casting):
173
+ return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors)
174
+
175
+
176
+ def _try_convert_to_tensor(obj):
177
+ try:
178
+ tensor = torch.as_tensor(obj)
179
+ except Exception as e:
180
+ mesg = f"failed to convert {obj} to ndarray. \nInternal error is: {str(e)}."
181
+ raise NotImplementedError(mesg) # noqa: TRY200
182
+ return tensor
183
+
184
+
185
+ def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0):
186
+ """The core logic of the array(...) function.
187
+
188
+ Parameters
189
+ ----------
190
+ obj : tensor_like
191
+ The thing to coerce
192
+ dtype : torch.dtype object or None
193
+ Coerce to this torch dtype
194
+ copy : bool
195
+ Copy or not
196
+ ndmin : int
197
+ The results as least this many dimensions
198
+ is_weak : bool
199
+ Whether obj is a weakly typed python scalar.
200
+
201
+ Returns
202
+ -------
203
+ tensor : torch.Tensor
204
+ a tensor object with requested dtype, ndim and copy semantics.
205
+
206
+ Notes
207
+ -----
208
+ This is almost a "tensor_like" coersion function. Does not handle wrapper
209
+ ndarrays (those should be handled in the ndarray-aware layer prior to
210
+ invoking this function).
211
+ """
212
+ if isinstance(obj, torch.Tensor):
213
+ tensor = obj
214
+ else:
215
+ # tensor.dtype is the pytorch default, typically float32. If obj's elements
216
+ # are not exactly representable in float32, we've lost precision:
217
+ # >>> torch.as_tensor(1e12).item() - 1e12
218
+ # -4096.0
219
+ default_dtype = torch.get_default_dtype()
220
+ torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32))
221
+ try:
222
+ tensor = _try_convert_to_tensor(obj)
223
+ finally:
224
+ torch.set_default_dtype(default_dtype)
225
+
226
+ # type cast if requested
227
+ tensor = cast_if_needed(tensor, dtype)
228
+
229
+ # adjust ndim if needed
230
+ ndim_extra = ndmin - tensor.ndim
231
+ if ndim_extra > 0:
232
+ tensor = tensor.view((1,) * ndim_extra + tensor.shape)
233
+
234
+ # copy if requested
235
+ if copy:
236
+ tensor = tensor.clone()
237
+
238
+ return tensor
239
+
240
+
241
+ def ndarrays_to_tensors(*inputs):
242
+ """Convert all ndarrays from `inputs` to tensors. (other things are intact)"""
243
+ from ._ndarray import ndarray
244
+
245
+ if len(inputs) == 0:
246
+ return ValueError()
247
+ elif len(inputs) == 1:
248
+ input_ = inputs[0]
249
+ if isinstance(input_, ndarray):
250
+ return input_.tensor
251
+ elif isinstance(input_, tuple):
252
+ result = []
253
+ for sub_input in input_:
254
+ sub_result = ndarrays_to_tensors(sub_input)
255
+ result.append(sub_result)
256
+ return tuple(result)
257
+ else:
258
+ return input_
259
+ else:
260
+ assert isinstance(inputs, tuple) # sanity check
261
+ return ndarrays_to_tensors(inputs)
venv/lib/python3.10/site-packages/torch/_numpy/fft.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+
7
+ import torch
8
+
9
+ from . import _dtypes_impl, _util
10
+ from ._normalizations import ArrayLike, normalizer
11
+
12
+
13
+ def upcast(func):
14
+ """NumPy fft casts inputs to 64 bit and *returns 64-bit results*."""
15
+
16
+ @functools.wraps(func)
17
+ def wrapped(tensor, *args, **kwds):
18
+ target_dtype = (
19
+ _dtypes_impl.default_dtypes().complex_dtype
20
+ if tensor.is_complex()
21
+ else _dtypes_impl.default_dtypes().float_dtype
22
+ )
23
+ tensor = _util.cast_if_needed(tensor, target_dtype)
24
+ return func(tensor, *args, **kwds)
25
+
26
+ return wrapped
27
+
28
+
29
+ @normalizer
30
+ @upcast
31
+ def fft(a: ArrayLike, n=None, axis=-1, norm=None):
32
+ return torch.fft.fft(a, n, dim=axis, norm=norm)
33
+
34
+
35
+ @normalizer
36
+ @upcast
37
+ def ifft(a: ArrayLike, n=None, axis=-1, norm=None):
38
+ return torch.fft.ifft(a, n, dim=axis, norm=norm)
39
+
40
+
41
+ @normalizer
42
+ @upcast
43
+ def rfft(a: ArrayLike, n=None, axis=-1, norm=None):
44
+ return torch.fft.rfft(a, n, dim=axis, norm=norm)
45
+
46
+
47
+ @normalizer
48
+ @upcast
49
+ def irfft(a: ArrayLike, n=None, axis=-1, norm=None):
50
+ return torch.fft.irfft(a, n, dim=axis, norm=norm)
51
+
52
+
53
+ @normalizer
54
+ @upcast
55
+ def fftn(a: ArrayLike, s=None, axes=None, norm=None):
56
+ return torch.fft.fftn(a, s, dim=axes, norm=norm)
57
+
58
+
59
+ @normalizer
60
+ @upcast
61
+ def ifftn(a: ArrayLike, s=None, axes=None, norm=None):
62
+ return torch.fft.ifftn(a, s, dim=axes, norm=norm)
63
+
64
+
65
+ @normalizer
66
+ @upcast
67
+ def rfftn(a: ArrayLike, s=None, axes=None, norm=None):
68
+ return torch.fft.rfftn(a, s, dim=axes, norm=norm)
69
+
70
+
71
+ @normalizer
72
+ @upcast
73
+ def irfftn(a: ArrayLike, s=None, axes=None, norm=None):
74
+ return torch.fft.irfftn(a, s, dim=axes, norm=norm)
75
+
76
+
77
+ @normalizer
78
+ @upcast
79
+ def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
80
+ return torch.fft.fft2(a, s, dim=axes, norm=norm)
81
+
82
+
83
+ @normalizer
84
+ @upcast
85
+ def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
86
+ return torch.fft.ifft2(a, s, dim=axes, norm=norm)
87
+
88
+
89
+ @normalizer
90
+ @upcast
91
+ def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
92
+ return torch.fft.rfft2(a, s, dim=axes, norm=norm)
93
+
94
+
95
+ @normalizer
96
+ @upcast
97
+ def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
98
+ return torch.fft.irfft2(a, s, dim=axes, norm=norm)
99
+
100
+
101
+ @normalizer
102
+ @upcast
103
+ def hfft(a: ArrayLike, n=None, axis=-1, norm=None):
104
+ return torch.fft.hfft(a, n, dim=axis, norm=norm)
105
+
106
+
107
+ @normalizer
108
+ @upcast
109
+ def ihfft(a: ArrayLike, n=None, axis=-1, norm=None):
110
+ return torch.fft.ihfft(a, n, dim=axis, norm=norm)
111
+
112
+
113
+ @normalizer
114
+ def fftfreq(n, d=1.0):
115
+ return torch.fft.fftfreq(n, d)
116
+
117
+
118
+ @normalizer
119
+ def rfftfreq(n, d=1.0):
120
+ return torch.fft.rfftfreq(n, d)
121
+
122
+
123
+ @normalizer
124
+ def fftshift(x: ArrayLike, axes=None):
125
+ return torch.fft.fftshift(x, axes)
126
+
127
+
128
+ @normalizer
129
+ def ifftshift(x: ArrayLike, axes=None):
130
+ return torch.fft.ifftshift(x, axes)
venv/lib/python3.10/site-packages/torch/_numpy/linalg.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+ import math
7
+ from typing import Sequence
8
+
9
+ import torch
10
+
11
+ from . import _dtypes_impl, _util
12
+ from ._normalizations import ArrayLike, KeepDims, normalizer
13
+
14
+
15
+ class LinAlgError(Exception):
16
+ pass
17
+
18
+
19
+ def _atleast_float_1(a):
20
+ if not (a.dtype.is_floating_point or a.dtype.is_complex):
21
+ a = a.to(_dtypes_impl.default_dtypes().float_dtype)
22
+ return a
23
+
24
+
25
+ def _atleast_float_2(a, b):
26
+ dtyp = _dtypes_impl.result_type_impl(a, b)
27
+ if not (dtyp.is_floating_point or dtyp.is_complex):
28
+ dtyp = _dtypes_impl.default_dtypes().float_dtype
29
+
30
+ a = _util.cast_if_needed(a, dtyp)
31
+ b = _util.cast_if_needed(b, dtyp)
32
+ return a, b
33
+
34
+
35
+ def linalg_errors(func):
36
+ @functools.wraps(func)
37
+ def wrapped(*args, **kwds):
38
+ try:
39
+ return func(*args, **kwds)
40
+ except torch._C._LinAlgError as e:
41
+ raise LinAlgError(*e.args) # noqa: TRY200
42
+
43
+ return wrapped
44
+
45
+
46
+ # ### Matrix and vector products ###
47
+
48
+
49
+ @normalizer
50
+ @linalg_errors
51
+ def matrix_power(a: ArrayLike, n):
52
+ a = _atleast_float_1(a)
53
+ return torch.linalg.matrix_power(a, n)
54
+
55
+
56
+ @normalizer
57
+ @linalg_errors
58
+ def multi_dot(inputs: Sequence[ArrayLike], *, out=None):
59
+ return torch.linalg.multi_dot(inputs)
60
+
61
+
62
+ # ### Solving equations and inverting matrices ###
63
+
64
+
65
+ @normalizer
66
+ @linalg_errors
67
+ def solve(a: ArrayLike, b: ArrayLike):
68
+ a, b = _atleast_float_2(a, b)
69
+ return torch.linalg.solve(a, b)
70
+
71
+
72
+ @normalizer
73
+ @linalg_errors
74
+ def lstsq(a: ArrayLike, b: ArrayLike, rcond=None):
75
+ a, b = _atleast_float_2(a, b)
76
+ # NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991
77
+ # on CUDA, only `gels` is available though, so use it instead
78
+ driver = "gels" if a.is_cuda or b.is_cuda else "gelsd"
79
+ return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
80
+
81
+
82
+ @normalizer
83
+ @linalg_errors
84
+ def inv(a: ArrayLike):
85
+ a = _atleast_float_1(a)
86
+ result = torch.linalg.inv(a)
87
+ return result
88
+
89
+
90
+ @normalizer
91
+ @linalg_errors
92
+ def pinv(a: ArrayLike, rcond=1e-15, hermitian=False):
93
+ a = _atleast_float_1(a)
94
+ return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian)
95
+
96
+
97
+ @normalizer
98
+ @linalg_errors
99
+ def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None):
100
+ a, b = _atleast_float_2(a, b)
101
+ return torch.linalg.tensorsolve(a, b, dims=axes)
102
+
103
+
104
+ @normalizer
105
+ @linalg_errors
106
+ def tensorinv(a: ArrayLike, ind=2):
107
+ a = _atleast_float_1(a)
108
+ return torch.linalg.tensorinv(a, ind=ind)
109
+
110
+
111
+ # ### Norms and other numbers ###
112
+
113
+
114
+ @normalizer
115
+ @linalg_errors
116
+ def det(a: ArrayLike):
117
+ a = _atleast_float_1(a)
118
+ return torch.linalg.det(a)
119
+
120
+
121
+ @normalizer
122
+ @linalg_errors
123
+ def slogdet(a: ArrayLike):
124
+ a = _atleast_float_1(a)
125
+ return torch.linalg.slogdet(a)
126
+
127
+
128
+ @normalizer
129
+ @linalg_errors
130
+ def cond(x: ArrayLike, p=None):
131
+ x = _atleast_float_1(x)
132
+
133
+ # check if empty
134
+ # cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
135
+ if x.numel() == 0 and math.prod(x.shape[-2:]) == 0:
136
+ raise LinAlgError("cond is not defined on empty arrays")
137
+
138
+ result = torch.linalg.cond(x, p=p)
139
+
140
+ # Convert nans to infs (numpy does it in a data-dependent way, depending on
141
+ # whether the input array has nans or not)
142
+ # XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
143
+ return torch.where(torch.isnan(result), float("inf"), result)
144
+
145
+
146
+ @normalizer
147
+ @linalg_errors
148
+ def matrix_rank(a: ArrayLike, tol=None, hermitian=False):
149
+ a = _atleast_float_1(a)
150
+
151
+ if a.ndim < 2:
152
+ return int((a != 0).any())
153
+
154
+ if tol is None:
155
+ # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885
156
+ atol = 0
157
+ rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps
158
+ else:
159
+ atol, rtol = tol, 0
160
+ return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian)
161
+
162
+
163
+ @normalizer
164
+ @linalg_errors
165
+ def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False):
166
+ x = _atleast_float_1(x)
167
+ return torch.linalg.norm(x, ord=ord, dim=axis)
168
+
169
+
170
+ # ### Decompositions ###
171
+
172
+
173
+ @normalizer
174
+ @linalg_errors
175
+ def cholesky(a: ArrayLike):
176
+ a = _atleast_float_1(a)
177
+ return torch.linalg.cholesky(a)
178
+
179
+
180
+ @normalizer
181
+ @linalg_errors
182
+ def qr(a: ArrayLike, mode="reduced"):
183
+ a = _atleast_float_1(a)
184
+ result = torch.linalg.qr(a, mode=mode)
185
+ if mode == "r":
186
+ # match NumPy
187
+ result = result.R
188
+ return result
189
+
190
+
191
+ @normalizer
192
+ @linalg_errors
193
+ def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False):
194
+ a = _atleast_float_1(a)
195
+ if not compute_uv:
196
+ return torch.linalg.svdvals(a)
197
+
198
+ # NB: ignore the hermitian= argument (no pytorch equivalent)
199
+ result = torch.linalg.svd(a, full_matrices=full_matrices)
200
+ return result
201
+
202
+
203
+ # ### Eigenvalues and eigenvectors ###
204
+
205
+
206
+ @normalizer
207
+ @linalg_errors
208
+ def eig(a: ArrayLike):
209
+ a = _atleast_float_1(a)
210
+ w, vt = torch.linalg.eig(a)
211
+
212
+ if not a.is_complex() and w.is_complex() and (w.imag == 0).all():
213
+ w = w.real
214
+ vt = vt.real
215
+ return w, vt
216
+
217
+
218
+ @normalizer
219
+ @linalg_errors
220
+ def eigh(a: ArrayLike, UPLO="L"):
221
+ a = _atleast_float_1(a)
222
+ return torch.linalg.eigh(a, UPLO=UPLO)
223
+
224
+
225
+ @normalizer
226
+ @linalg_errors
227
+ def eigvals(a: ArrayLike):
228
+ a = _atleast_float_1(a)
229
+ result = torch.linalg.eigvals(a)
230
+ if not a.is_complex() and result.is_complex() and (result.imag == 0).all():
231
+ result = result.real
232
+ return result
233
+
234
+
235
+ @normalizer
236
+ @linalg_errors
237
+ def eigvalsh(a: ArrayLike, UPLO="L"):
238
+ a = _atleast_float_1(a)
239
+ return torch.linalg.eigvalsh(a, UPLO=UPLO)
venv/lib/python3.10/site-packages/torch/_numpy/random.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Wrapper to mimic (parts of) np.random API surface.
4
+
5
+ NumPy has strict guarantees on reproducibility etc; here we don't give any.
6
+
7
+ Q: default dtype is float64 in numpy
8
+
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import functools
13
+ from math import sqrt
14
+ from typing import Optional
15
+
16
+ import torch
17
+
18
+ from . import _dtypes_impl, _util
19
+ from ._normalizations import array_or_scalar, ArrayLike, normalizer
20
+
21
+
22
+ __all__ = [
23
+ "seed",
24
+ "random_sample",
25
+ "sample",
26
+ "random",
27
+ "rand",
28
+ "randn",
29
+ "normal",
30
+ "choice",
31
+ "randint",
32
+ "shuffle",
33
+ "uniform",
34
+ ]
35
+
36
+
37
+ def use_numpy_random():
38
+ # local import to avoid ref cycles
39
+ import torch._dynamo.config as config
40
+
41
+ return config.use_numpy_random_stream
42
+
43
+
44
+ def deco_stream(func):
45
+ @functools.wraps(func)
46
+ def inner(*args, **kwds):
47
+ if not use_numpy_random():
48
+ return func(*args, **kwds)
49
+ else:
50
+ import numpy
51
+
52
+ from ._ndarray import ndarray
53
+
54
+ f = getattr(numpy.random, func.__name__)
55
+
56
+ # numpy funcs accept numpy ndarrays, unwrap
57
+ args = tuple(
58
+ arg.tensor.numpy() if isinstance(arg, ndarray) else arg for arg in args
59
+ )
60
+ kwds = {
61
+ key: val.tensor.numpy() if isinstance(val, ndarray) else val
62
+ for key, val in kwds.items()
63
+ }
64
+
65
+ value = f(*args, **kwds)
66
+
67
+ # `value` can be either numpy.ndarray or python scalar (or None)
68
+ if isinstance(value, numpy.ndarray):
69
+ value = ndarray(torch.as_tensor(value))
70
+
71
+ return value
72
+
73
+ return inner
74
+
75
+
76
+ @deco_stream
77
+ def seed(seed=None):
78
+ if seed is not None:
79
+ torch.random.manual_seed(seed)
80
+
81
+
82
+ @deco_stream
83
+ def random_sample(size=None):
84
+ if size is None:
85
+ size = ()
86
+ dtype = _dtypes_impl.default_dtypes().float_dtype
87
+ values = torch.empty(size, dtype=dtype).uniform_()
88
+ return array_or_scalar(values, return_scalar=size == ())
89
+
90
+
91
+ def rand(*size):
92
+ if size == ():
93
+ size = None
94
+ return random_sample(size)
95
+
96
+
97
+ sample = random_sample
98
+ random = random_sample
99
+
100
+
101
+ @deco_stream
102
+ def uniform(low=0.0, high=1.0, size=None):
103
+ if size is None:
104
+ size = ()
105
+ dtype = _dtypes_impl.default_dtypes().float_dtype
106
+ values = torch.empty(size, dtype=dtype).uniform_(low, high)
107
+ return array_or_scalar(values, return_scalar=size == ())
108
+
109
+
110
+ @deco_stream
111
+ def randn(*size):
112
+ dtype = _dtypes_impl.default_dtypes().float_dtype
113
+ values = torch.randn(size, dtype=dtype)
114
+ return array_or_scalar(values, return_scalar=size == ())
115
+
116
+
117
+ @deco_stream
118
+ def normal(loc=0.0, scale=1.0, size=None):
119
+ if size is None:
120
+ size = ()
121
+ dtype = _dtypes_impl.default_dtypes().float_dtype
122
+ values = torch.empty(size, dtype=dtype).normal_(loc, scale)
123
+ return array_or_scalar(values, return_scalar=size == ())
124
+
125
+
126
+ @deco_stream
127
+ def shuffle(x):
128
+ # no @normalizer because we do not cast e.g. lists to tensors
129
+ from ._ndarray import ndarray
130
+
131
+ if isinstance(x, torch.Tensor):
132
+ tensor = x
133
+ elif isinstance(x, ndarray):
134
+ tensor = x.tensor
135
+ else:
136
+ raise NotImplementedError("We do not random.shuffle lists in-place")
137
+
138
+ perm = torch.randperm(tensor.shape[0])
139
+ xp = tensor[perm]
140
+ tensor.copy_(xp)
141
+
142
+
143
+ @deco_stream
144
+ def randint(low, high=None, size=None):
145
+ if size is None:
146
+ size = ()
147
+ if not isinstance(size, (tuple, list)):
148
+ size = (size,)
149
+ if high is None:
150
+ low, high = 0, low
151
+ values = torch.randint(low, high, size=size)
152
+ return array_or_scalar(values, int, return_scalar=size == ())
153
+
154
+
155
+ @deco_stream
156
+ @normalizer
157
+ def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None):
158
+ # https://stackoverflow.com/questions/59461811/random-choice-with-pytorch
159
+ if a.numel() == 1:
160
+ a = torch.arange(a)
161
+
162
+ # TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises
163
+
164
+ # number of draws
165
+ if size is None:
166
+ num_el = 1
167
+ elif _util.is_sequence(size):
168
+ num_el = 1
169
+ for el in size:
170
+ num_el *= el
171
+ else:
172
+ num_el = size
173
+
174
+ # prepare the probabilities
175
+ if p is None:
176
+ p = torch.ones_like(a) / a.shape[0]
177
+
178
+ # cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973
179
+ atol = sqrt(torch.finfo(p.dtype).eps)
180
+ if abs(p.sum() - 1.0) > atol:
181
+ raise ValueError("probabilities do not sum to 1.")
182
+
183
+ # actually sample
184
+ indices = torch.multinomial(p, num_el, replacement=replace)
185
+
186
+ if _util.is_sequence(size):
187
+ indices = indices.reshape(size)
188
+
189
+ samples = a[indices]
190
+
191
+ return samples
venv/lib/python3.10/site-packages/torch/nn/backends/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc ADDED
Binary file (297 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/backends/thnn.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # this is for historical pickle deserialization, it is not used otherwise
2
+
3
+ def _get_thnn_function_backend():
4
+ pass
venv/lib/python3.10/site-packages/torch/nn/parallel/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .parallel_apply import parallel_apply
2
+ from .replicate import replicate
3
+ from .data_parallel import DataParallel, data_parallel
4
+ from .scatter_gather import gather, scatter
5
+ from .distributed import DistributedDataParallel
6
+
7
+ __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
8
+ 'DataParallel', 'DistributedDataParallel']
9
+
10
+ def DistributedDataParallelCPU(*args, **kwargs):
11
+ import warnings
12
+ warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
13
+ "please use torch.nn.parallel.DistributedDataParallel instead.")
14
+ return DistributedDataParallel(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (806 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (80.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
venv/lib/python3.10/site-packages/torch/nn/parallel/_functions.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import torch
4
+ from . import comm
5
+ from torch.autograd import Function
6
+ from torch._utils import _get_device_index
7
+ from typing import List, Optional
8
+
9
+
10
+ class Broadcast(Function):
11
+
12
+ @staticmethod
13
+ def forward(ctx, target_gpus, *inputs):
14
+ assert all(i.device.type != 'cpu' for i in inputs), (
15
+ 'Broadcast function not implemented for CPU tensors'
16
+ )
17
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
18
+ ctx.target_gpus = target_gpus
19
+ if len(inputs) == 0:
20
+ return tuple()
21
+ ctx.num_inputs = len(inputs)
22
+ ctx.input_device = inputs[0].get_device()
23
+ outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
24
+ non_differentiables = []
25
+ for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
26
+ if not input_requires_grad:
27
+ for output in outputs:
28
+ non_differentiables.append(output[idx])
29
+ ctx.mark_non_differentiable(*non_differentiables)
30
+ return tuple([t for tensors in outputs for t in tensors])
31
+
32
+ @staticmethod
33
+ def backward(ctx, *grad_outputs):
34
+ return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
35
+
36
+
37
+ class ReduceAddCoalesced(Function):
38
+
39
+ @staticmethod
40
+ def forward(ctx, destination, num_inputs, *grads):
41
+ ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
42
+
43
+ grads_ = [grads[i:i + num_inputs]
44
+ for i in range(0, len(grads), num_inputs)]
45
+ return comm.reduce_add_coalesced(grads_, destination)
46
+
47
+ @staticmethod
48
+ def backward(ctx, *grad_outputs):
49
+ return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
50
+
51
+
52
+ class Gather(Function):
53
+
54
+ @staticmethod
55
+ def forward(ctx, target_device, dim, *inputs):
56
+ assert all(i.device.type != 'cpu' for i in inputs), (
57
+ 'Gather function not implemented for CPU tensors'
58
+ )
59
+ if (target_device == 'cpu'):
60
+ ctx.target_device = 'cpu'
61
+ else:
62
+ target_device = _get_device_index(target_device, True)
63
+ ctx.target_device = target_device
64
+ ctx.dim = dim
65
+ ctx.input_gpus = tuple(i.get_device() for i in inputs)
66
+ if all(t.dim() == 0 for t in inputs) and dim == 0:
67
+ inputs = tuple(t.view(1) for t in inputs)
68
+ warnings.warn('Was asked to gather along dimension 0, but all '
69
+ 'input tensors were scalars; will instead unsqueeze '
70
+ 'and return a vector.')
71
+ ctx.unsqueezed_scalar = True
72
+ else:
73
+ ctx.unsqueezed_scalar = False
74
+ ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs)
75
+ return comm.gather(inputs, ctx.dim, ctx.target_device)
76
+
77
+ @staticmethod
78
+ def backward(ctx, grad_output):
79
+ scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
80
+ if ctx.unsqueezed_scalar:
81
+ scattered_grads = tuple(g[0] for g in scattered_grads)
82
+ return (None, None) + scattered_grads
83
+
84
+
85
+ class Scatter(Function):
86
+
87
+ @staticmethod
88
+ def forward(ctx, target_gpus, chunk_sizes, dim, input):
89
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
90
+ ctx.dim = dim
91
+ ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
92
+ streams = None
93
+ if torch.cuda.is_available() and ctx.input_device == -1:
94
+ # Perform CPU to GPU copies in a background stream
95
+ streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus]
96
+ outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
97
+ # Synchronize with the copy stream
98
+ if streams is not None:
99
+ for i, output in enumerate(outputs):
100
+ with torch.cuda.device(target_gpus[i]):
101
+ main_stream = torch.cuda.current_stream()
102
+ main_stream.wait_stream(streams[i])
103
+ output.record_stream(main_stream)
104
+ return outputs
105
+
106
+ @staticmethod
107
+ def backward(ctx, *grad_output):
108
+ return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
109
+
110
+
111
+ # background streams used for copying
112
+ _streams: Optional[List[Optional[torch.Stream]]] = None
113
+
114
+ def _get_stream(device: torch.device):
115
+ """Get a background stream for copying between CPU and target device."""
116
+ global _streams
117
+ if device.type == "cpu":
118
+ return None
119
+ device_mod = getattr(torch, device.type, None)
120
+ if device_mod is None:
121
+ return None
122
+ if _streams is None:
123
+ _streams = [None] * device_mod.device_count()
124
+ if _streams[device.index] is None:
125
+ _streams[device.index] = device_mod.Stream(device.index)
126
+ return _streams[device.index]
venv/lib/python3.10/site-packages/torch/nn/parallel/comm.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import torch
3
+ from torch.cuda import nccl
4
+ from torch._utils import _take_tensors, _flatten_dense_tensors, \
5
+ _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex
6
+ from typing import List
7
+
8
+ def broadcast(tensor, devices=None, *, out=None):
9
+ r"""Broadcasts a tensor to specified GPU devices.
10
+
11
+ Args:
12
+ tensor (Tensor): tensor to broadcast. Can be on CPU or GPU.
13
+ devices (Iterable[torch.device, str or int], optional): an iterable of
14
+ GPU devices, among which to broadcast.
15
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
16
+ store output results.
17
+
18
+ .. note::
19
+ Exactly one of :attr:`devices` and :attr:`out` must be specified.
20
+
21
+ Returns:
22
+ - If :attr:`devices` is specified,
23
+ a tuple containing copies of :attr:`tensor`, placed on
24
+ :attr:`devices`.
25
+ - If :attr:`out` is specified,
26
+ a tuple containing :attr:`out` tensors, each containing a copy of
27
+ :attr:`tensor`.
28
+ """
29
+ tensor = _handle_complex(tensor)
30
+ if not ((devices is None) ^ (out is None)):
31
+ raise RuntimeError(
32
+ f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}")
33
+ if devices is not None:
34
+ devices = [_get_device_index(d) for d in devices]
35
+ return torch._C._broadcast(tensor, devices)
36
+ else:
37
+ return torch._C._broadcast_out(tensor, out)
38
+
39
+
40
+ def broadcast_coalesced(tensors, devices, buffer_size=10485760):
41
+ """Broadcast a sequence of tensors to the specified GPUs.
42
+
43
+ Small tensors are first coalesced into a buffer to reduce the number of synchronizations.
44
+
45
+ Args:
46
+ tensors (sequence): tensors to broadcast. Must be on the same device,
47
+ either CPU or GPU.
48
+ devices (Iterable[torch.device, str or int]): an iterable of GPU
49
+ devices, among which to broadcast.
50
+ buffer_size (int): maximum size of the buffer used for coalescing
51
+
52
+ Returns:
53
+ A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`.
54
+ """
55
+ devices = [_get_device_index(d) for d in devices]
56
+ tensors = [_handle_complex(t) for t in tensors]
57
+ return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
58
+
59
+
60
+ def reduce_add(inputs, destination=None):
61
+ """Sum tensors from multiple GPUs.
62
+
63
+ All inputs should have matching shapes, dtype, and layout. The output tensor
64
+ will be of the same shape, dtype, and layout.
65
+
66
+ Args:
67
+ inputs (Iterable[Tensor]): an iterable of tensors to add.
68
+ destination (int, optional): a device on which the output will be
69
+ placed (default: current device).
70
+
71
+ Returns:
72
+ A tensor containing an elementwise sum of all inputs, placed on the
73
+ :attr:`destination` device.
74
+ """
75
+ destination = _get_device_index(destination, optional=True)
76
+ input_size = inputs[0].size()
77
+ root_index = None # index of input tensor that already is on the correct device
78
+ for i, inp in enumerate(inputs):
79
+ assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs"
80
+ if inp.get_device() == destination:
81
+ root_index = i
82
+ if inp.size() != input_size:
83
+ got = 'x'.join(str(x) for x in inp.size())
84
+ expected = 'x'.join(str(x) for x in input_size)
85
+ raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}")
86
+ if root_index is None:
87
+ raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
88
+
89
+ if len(inputs) == 1:
90
+ return inputs[0]
91
+
92
+ if nccl.is_available(inputs):
93
+ result = torch.empty_like(inputs[root_index])
94
+ nccl.reduce(inputs, output=result, root=root_index)
95
+ else:
96
+ destination_device = torch.device(inputs[root_index].device.type, destination)
97
+ nonroot = [t for i, t in enumerate(inputs) if i != root_index]
98
+ # make a new tensor w/o clone
99
+ result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)
100
+ for other in nonroot[1:]:
101
+ result.add_(other.to(device=destination_device, non_blocking=True))
102
+ return result
103
+
104
+
105
+ def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
106
+ """Sum tensors from multiple GPUs.
107
+
108
+ Small tensors are first coalesced into a buffer to reduce the number
109
+ of synchronizations.
110
+
111
+ Args:
112
+ inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
113
+ contain tensors from a single device.
114
+ destination (int, optional): a device on which the output will be
115
+ placed (default: current device).
116
+ buffer_size (int): maximum size of the buffer used for coalescing
117
+
118
+ Returns:
119
+ A tuple of tensors containing an elementwise sum of each group of
120
+ inputs, placed on the ``destination`` device.
121
+ """
122
+ # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
123
+ # return `inputs`.
124
+ dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors)
125
+ output = []
126
+ ref_order = []
127
+ # process sparse ones first since they may have different sizes on different gpus
128
+ for tensor_at_gpus in zip(*inputs):
129
+ if all(t.is_sparse for t in tensor_at_gpus):
130
+ result = reduce_add(tensor_at_gpus, destination) # this will be sparse too
131
+ output.append(result)
132
+ ref_order.append(tensor_at_gpus[0])
133
+ else:
134
+ for coll, t in zip(dense_tensors, tensor_at_gpus):
135
+ coll.append(t.to_dense() if t.is_sparse else t)
136
+ ref_order.append(dense_tensors[0][-1])
137
+ itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
138
+ # now the dense ones, which have consistent sizes
139
+ for chunks in zip(*itrs):
140
+ flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,)
141
+ flat_result = reduce_add(flat_tensors, destination)
142
+ for t in _unflatten_dense_tensors(flat_result, chunks[0]):
143
+ # The unflattened tensors do not share storage, and we don't expose
144
+ # base flat tensor anyways, so give them different version counters.
145
+ # See NOTE [ Version Counter in comm.*_coalesced ]
146
+ output.append(t.data)
147
+ return tuple(_reorder_tensors_as(output, ref_order))
148
+
149
+
150
+ def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):
151
+ """Scatters tensor across multiple GPUs.
152
+
153
+ Args:
154
+ tensor (Tensor): tensor to scatter. Can be on CPU or GPU.
155
+ devices (Iterable[torch.device, str or int], optional): an iterable of
156
+ GPU devices, among which to scatter.
157
+ chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
158
+ each device. It should match :attr:`devices` in length and sums to
159
+ ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided
160
+ into equal chunks.
161
+ dim (int, optional): A dimension along which to chunk :attr:`tensor`.
162
+ Default: ``0``.
163
+ streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among
164
+ which to execute the scatter. If not specified, the default stream will
165
+ be utilized.
166
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
167
+ store output results. Sizes of these tensors must match that of
168
+ :attr:`tensor`, except for :attr:`dim`, where the total size must
169
+ sum to ``tensor.size(dim)``.
170
+
171
+ .. note::
172
+ Exactly one of :attr:`devices` and :attr:`out` must be specified. When
173
+ :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and
174
+ will be inferred from sizes of :attr:`out`.
175
+
176
+ Returns:
177
+ - If :attr:`devices` is specified,
178
+ a tuple containing chunks of :attr:`tensor`, placed on
179
+ :attr:`devices`.
180
+ - If :attr:`out` is specified,
181
+ a tuple containing :attr:`out` tensors, each containing a chunk of
182
+ :attr:`tensor`.
183
+ """
184
+ tensor = _handle_complex(tensor)
185
+ if out is None:
186
+ devices = [_get_device_index(d) for d in devices]
187
+ return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
188
+ else:
189
+ if devices is not None:
190
+ raise RuntimeError(
191
+ f"'devices' must not be specified when 'out' is specified, but got devices={devices}")
192
+ if chunk_sizes is not None:
193
+ raise RuntimeError(
194
+ f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}")
195
+ return tuple(torch._C._scatter_out(tensor, out, dim, streams))
196
+
197
+
198
+ def gather(tensors, dim=0, destination=None, *, out=None):
199
+ r"""Gathers tensors from multiple GPU devices.
200
+
201
+ Args:
202
+ tensors (Iterable[Tensor]): an iterable of tensors to gather.
203
+ Tensor sizes in all dimensions other than :attr:`dim` have to match.
204
+ dim (int, optional): a dimension along which the tensors will be
205
+ concatenated. Default: ``0``.
206
+ destination (torch.device, str, or int, optional): the output device.
207
+ Can be CPU or CUDA. Default: the current CUDA device.
208
+ out (Tensor, optional, keyword-only): the tensor to store gather result.
209
+ Its sizes must match those of :attr:`tensors`, except for :attr:`dim`,
210
+ where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``.
211
+ Can be on CPU or CUDA.
212
+
213
+ .. note::
214
+ :attr:`destination` must not be specified when :attr:`out` is specified.
215
+
216
+ Returns:
217
+ - If :attr:`destination` is specified,
218
+ a tensor located on :attr:`destination` device, that is a result of
219
+ concatenating :attr:`tensors` along :attr:`dim`.
220
+ - If :attr:`out` is specified,
221
+ the :attr:`out` tensor, now containing results of concatenating
222
+ :attr:`tensors` along :attr:`dim`.
223
+ """
224
+ tensors = [_handle_complex(t) for t in tensors]
225
+ if out is None:
226
+ if destination == -1:
227
+ warnings.warn(
228
+ 'Using -1 to represent CPU tensor is deprecated. Please use a '
229
+ 'device object or string instead, e.g., "cpu".')
230
+ destination = _get_device_index(destination, allow_cpu=True, optional=True)
231
+ return torch._C._gather(tensors, dim, destination)
232
+ else:
233
+ if destination is not None:
234
+ raise RuntimeError(
235
+ f"'destination' must not be specified when 'out' is specified, but got destination={destination}")
236
+ return torch._C._gather_out(tensors, out, dim)