applied-ai-018 commited on
Commit
eeb0c04
·
verified ·
1 Parent(s): b81136f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/_C/__init__.pyi +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/_C/_aoti.pyi +3 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/_C/_functions.pyi +11 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/_C/_itt.pyi +5 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy.pyi +28 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi +11 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/_C/_monitor.pyi +44 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/_C/_nvtx.pyi +6 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/_C/_profiler.pyi +238 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/_C/_verbose.pyi +3 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__init__.py +1985 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/_prims_common/wrappers.py +401 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py +289 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py +108 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py +313 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py +49 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py +957 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py +226 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py +30 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_sgd.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/zero_redundancy_optimizer.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/fft/__init__.py +1360 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/futures/__init__.py +318 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/nn/__init__.py +53 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/nn/_reduction.py +47 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__init__.py +117 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/nn/common_types.py +42 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/nn/cpp.py +88 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/nn/functional.py +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/nn/functional.pyi +682 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/nn/grad.py +189 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/nn/init.py +626 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__init__.py +14 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/_C/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/_C/_aoti.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/inductor/aoti_runner/pybind.cpp
2
+ class AOTIModelContainerRunnerCpu: ...
3
+ class AOTIModelContainerRunnerCuda: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_functions.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import AnyStr, List
2
+
3
+ from torch import Tensor
4
+
5
+ class UndefinedGrad:
6
+ def __init__(self) -> None: ...
7
+ def __call__(self, *inputs: Tensor) -> List[Tensor]: ...
8
+
9
+ class DelayedError:
10
+ def __init__(self, msg: AnyStr, num_inputs: int) -> None: ...
11
+ def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_itt.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Defined in torch/csrc/itt.cpp
2
+ def is_available() -> None: ...
3
+ def rangePush(message: str) -> None: ...
4
+ def rangePop() -> None: ...
5
+ def mark(message: str) -> None: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from torch import Tensor
4
+
5
+ # defined in torch/csrc/lazy/python/init.cpp
6
+ def _mark_step(device: str, devices: List[str], wait: bool): ...
7
+ def _wait_device_ops(devices: List[str]): ...
8
+ def _reset_metrics(): ...
9
+ def _counter_names() -> List[str]: ...
10
+ def _counter_value(name: str) -> int: ...
11
+ def _metrics_report() -> str: ...
12
+ def _get_graph_hash(tensors: List[Tensor]) -> str: ...
13
+ def _sync_multi(
14
+ tensors: List[Tensor],
15
+ devices: List[str],
16
+ wait: bool = True,
17
+ sync_ltc_data: bool = True,
18
+ ): ...
19
+ def _get_tensor_id(tensor: Tensor) -> int: ...
20
+ def _get_tensors_text(tensors: List[Tensor]) -> str: ...
21
+ def _get_tensors_dot(tensors: List[Tensor]) -> str: ...
22
+ def _get_tensors_backend(tensors: List[Tensor]) -> str: ...
23
+ def _get_force_fallback() -> str: ...
24
+ def _set_force_fallback(newval: str): ...
25
+ def _clear_ir_cache(): ...
26
+ def _dump_ir_cache(filename: str): ...
27
+ def _set_reuse_ir(val: bool): ...
28
+ def _get_default_device_type(): ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # defined in torch/csrc/lazy/python/init.cpp
2
+
3
+ from typing import Any, List, Tuple
4
+
5
+ from torch import Tensor
6
+
7
+ def _init(): ...
8
+ def _get_tensors_ts_device_data_node(
9
+ tensors: List[Tensor],
10
+ ) -> Tuple[List[int], List[Any]]: ...
11
+ def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_monitor.pyi ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/monitor/python_init.cpp
2
+
3
+ import datetime
4
+ from enum import Enum
5
+ from typing import Callable, Dict, List, Union
6
+
7
+ class Aggregation(Enum):
8
+ VALUE = ...
9
+ MEAN = ...
10
+ COUNT = ...
11
+ SUM = ...
12
+ MAX = ...
13
+ MIN = ...
14
+
15
+ class Stat:
16
+ name: str
17
+ count: int
18
+ def __init__(
19
+ self,
20
+ name: str,
21
+ aggregations: List[Aggregation],
22
+ window_size: int,
23
+ max_samples: int = -1,
24
+ ) -> None: ...
25
+ def add(self, v: float) -> None: ...
26
+ def get(self) -> Dict[Aggregation, float]: ...
27
+
28
+ class Event:
29
+ name: str
30
+ timestamp: datetime.datetime
31
+ data: Dict[str, Union[int, float, bool, str]]
32
+ def __init__(
33
+ self,
34
+ name: str,
35
+ timestamp: datetime.datetime,
36
+ data: Dict[str, Union[int, float, bool, str]],
37
+ ) -> None: ...
38
+
39
+ def log_event(e: Event) -> None: ...
40
+
41
+ class EventHandlerHandle: ...
42
+
43
+ def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ...
44
+ def unregister_event_handler(handle: EventHandlerHandle) -> None: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_nvtx.pyi ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/cuda/shared/nvtx.cpp
2
+ def rangePushA(message: str) -> int: ...
3
+ def rangePop() -> int: ...
4
+ def rangeStartA(message: str) -> int: ...
5
+ def rangeEnd(int) -> None: ...
6
+ def markA(message: str) -> None: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_profiler.pyi ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
3
+
4
+ from torch._C import device, dtype, layout
5
+ from typing_extensions import TypeAlias
6
+
7
+ # defined in torch/csrc/profiler/python/init.cpp
8
+
9
+ class RecordScope(Enum):
10
+ FUNCTION = ...
11
+ BACKWARD_FUNCTION = ...
12
+ TORCHSCRIPT_FUNCTION = ...
13
+ KERNEL_FUNCTION_DTYPE = ...
14
+ CUSTOM_CLASS = ...
15
+ BUILD_FEATURE = ...
16
+ LITE_INTERPRETER = ...
17
+ USER_SCOPE = ...
18
+ STATIC_RUNTIME_OP = ...
19
+ STATIC_RUNTIME_MODEL = ...
20
+
21
+ class ProfilerState(Enum):
22
+ Disable = ...
23
+ CPU = ...
24
+ CUDA = ...
25
+ NVTX = ...
26
+ ITT = ...
27
+ KINETO = ...
28
+ KINETO_GPU_FALLBACK = ...
29
+ KINETO_PRIVATEUSE1_FALLBACK = ...
30
+ KINETO_PRIVATEUSE1 = ...
31
+
32
+ class ActiveProfilerType(Enum):
33
+ NONE = ...
34
+ LEGACY = ...
35
+ KINETO = ...
36
+ NVTX = ...
37
+ ITT = ...
38
+
39
+ class ProfilerActivity(Enum):
40
+ CPU = ...
41
+ CUDA = ...
42
+ MTIA = ...
43
+ PrivateUse1 = ...
44
+
45
+ class _EventType(Enum):
46
+ TorchOp = ...
47
+ Backend = ...
48
+ Allocation = ...
49
+ OutOfMemory = ...
50
+ PyCall = ...
51
+ PyCCall = ...
52
+ Kineto = ...
53
+
54
+ class _ExperimentalConfig:
55
+ def __init__(
56
+ self,
57
+ profiler_metrics: List[str] = ...,
58
+ profiler_measure_per_kernel: bool = ...,
59
+ verbose: bool = ...,
60
+ performance_events: List[str] = ...,
61
+ enable_cuda_sync_events: bool = ...,
62
+ ) -> None: ...
63
+
64
+ class ProfilerConfig:
65
+ def __init__(
66
+ self,
67
+ state: ProfilerState,
68
+ report_input_shapes: bool,
69
+ profile_memory: bool,
70
+ with_stack: bool,
71
+ with_flops: bool,
72
+ with_modules: bool,
73
+ experimental_config: _ExperimentalConfig,
74
+ ) -> None: ...
75
+
76
+ class _ProfilerEvent:
77
+ start_tid: int
78
+ start_time_ns: int
79
+ children: List[_ProfilerEvent]
80
+
81
+ # TODO(robieta): remove in favor of `self.typed`
82
+ extra_fields: Union[
83
+ _ExtraFields_TorchOp,
84
+ _ExtraFields_Backend,
85
+ _ExtraFields_Allocation,
86
+ _ExtraFields_OutOfMemory,
87
+ _ExtraFields_PyCall,
88
+ _ExtraFields_PyCCall,
89
+ _ExtraFields_Kineto,
90
+ ]
91
+
92
+ @property
93
+ def typed(
94
+ self,
95
+ ) -> Union[
96
+ Tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp],
97
+ Tuple[Literal[_EventType.Backend], _ExtraFields_Backend],
98
+ Tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation],
99
+ Tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory],
100
+ Tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall],
101
+ Tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall],
102
+ Tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto],
103
+ ]: ...
104
+ @property
105
+ def name(self) -> str: ...
106
+ @property
107
+ def tag(self) -> _EventType: ...
108
+ @property
109
+ def id(self) -> int: ...
110
+ @property
111
+ def parent(self) -> Optional[_ProfilerEvent]: ...
112
+ @property
113
+ def correlation_id(self) -> int: ...
114
+ @property
115
+ def end_time_ns(self) -> int: ...
116
+ @property
117
+ def duration_time_ns(self) -> int: ...
118
+
119
+ class _TensorMetadata:
120
+ impl_ptr: Optional[int]
121
+ storage_data_ptr: Optional[int]
122
+ id: Optional[int]
123
+
124
+ @property
125
+ def allocation_id(self) -> Optional[int]: ...
126
+ @property
127
+ def layout(self) -> layout: ...
128
+ @property
129
+ def device(self) -> device: ...
130
+ @property
131
+ def dtype(self) -> dtype: ...
132
+ @property
133
+ def sizes(self) -> List[int]: ...
134
+ @property
135
+ def strides(self) -> List[int]: ...
136
+
137
+ Scalar: TypeAlias = Union[int, float, bool, complex]
138
+ Input: TypeAlias = Optional[Union[_TensorMetadata, List[_TensorMetadata], Scalar]]
139
+
140
+ class _ExtraFields_TorchOp:
141
+ name: str
142
+ sequence_number: int
143
+ allow_tf32_cublas: bool
144
+
145
+ @property
146
+ def inputs(self) -> List[Input]: ...
147
+ @property
148
+ def scope(self) -> RecordScope: ...
149
+
150
+ class _ExtraFields_Backend: ...
151
+
152
+ class _ExtraFields_Allocation:
153
+ ptr: int
154
+ id: Optional[int]
155
+ alloc_size: int
156
+ total_allocated: int
157
+ total_reserved: int
158
+
159
+ @property
160
+ def allocation_id(self) -> Optional[int]: ...
161
+ @property
162
+ def device(self) -> device: ...
163
+
164
+ class _ExtraFields_OutOfMemory: ...
165
+
166
+ class _PyFrameState:
167
+ line_number: int
168
+ function_name: str
169
+
170
+ @property
171
+ def file_name(self) -> str: ...
172
+
173
+ class _NNModuleInfo:
174
+ @property
175
+ def self_ptr(self) -> int: ...
176
+ @property
177
+ def cls_ptr(self) -> int: ...
178
+ @property
179
+ def cls_name(self) -> str: ...
180
+ @property
181
+ def parameters(
182
+ self,
183
+ ) -> List[Tuple[str, _TensorMetadata, Optional[_TensorMetadata]]]: ...
184
+
185
+ class _OptimizerInfo:
186
+ @property
187
+ def parameters(
188
+ self,
189
+ ) -> List[
190
+ Tuple[
191
+ # Parameter
192
+ _TensorMetadata,
193
+ #
194
+ # Gradient (if present during optimizer.step())
195
+ Optional[_TensorMetadata],
196
+ #
197
+ # Optimizer state for Parameter as (name, tensor) pairs
198
+ List[Tuple[str, _TensorMetadata]],
199
+ ]
200
+ ]: ...
201
+
202
+ class _ExtraFields_PyCCall:
203
+ @property
204
+ def caller(self) -> _PyFrameState: ...
205
+
206
+ class _ExtraFields_PyCall:
207
+ @property
208
+ def callsite(self) -> _PyFrameState: ...
209
+ @property
210
+ def caller(self) -> _PyFrameState: ...
211
+ @property
212
+ def module(self) -> Optional[_NNModuleInfo]: ...
213
+ @property
214
+ def optimizer(self) -> Optional[_OptimizerInfo]: ...
215
+
216
+ class _ExtraFields_Kineto: ...
217
+
218
+ def _add_execution_trace_observer(output_file_path: str) -> bool: ...
219
+ def _remove_execution_trace_observer() -> None: ...
220
+ def _enable_execution_trace_observer() -> None: ...
221
+ def _disable_execution_trace_observer() -> None: ...
222
+ def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ...
223
+ def _set_fwd_bwd_enabled_val(val: bool) -> None: ...
224
+ def _set_cuda_sync_enabled_val(val: bool) -> None: ...
225
+
226
+ class CapturedTraceback: ...
227
+
228
+ def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ...
229
+
230
+ # The Dict has name, filename, line
231
+ def symbolize_tracebacks(
232
+ to_symbolize: List[CapturedTraceback],
233
+ ) -> List[List[Dict[str, str]]]: ...
234
+
235
+ class _RecordFunctionFast:
236
+ def __init__(self, name: str) -> None: ...
237
+ def __enter__(self) -> None: ...
238
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_verbose.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/utils/verbose.cpp
2
+ def mkl_set_verbose(enable: int) -> int: ...
3
+ def mkldnn_set_verbose(level: int) -> int: ...
llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__init__.py ADDED
@@ -0,0 +1,1985 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ import warnings
5
+ import weakref
6
+
7
+ from contextlib import nullcontext
8
+ from enum import Enum
9
+ from functools import cmp_to_key, reduce
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ cast,
14
+ List,
15
+ NamedTuple,
16
+ Optional,
17
+ overload,
18
+ Sequence,
19
+ Tuple,
20
+ Type,
21
+ TYPE_CHECKING,
22
+ Union,
23
+ )
24
+
25
+ from typing_extensions import TypeAlias
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ # Import the following modules during type checking to enable code intelligence features,
30
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
31
+ # imported in user code.
32
+
33
+ import sympy
34
+
35
+ import torch
36
+ from torch import sym_float, sym_int, sym_max
37
+
38
+
39
+ ShapeType: TypeAlias = Union[torch.Size, List[int], Tuple[int, ...]]
40
+ StrideType: TypeAlias = Union[List[int], Tuple[int, ...]]
41
+ DimsType: TypeAlias = Union[int, List[int], Tuple[int, ...]]
42
+ DimsSequenceType: TypeAlias = Union[List[int], Tuple[int, ...]]
43
+ # TODO: Type[torch.SymInt], Type[torch.SymFloat]
44
+ NumberTypeType: TypeAlias = Union[Type[bool], Type[int], Type[float], Type[complex]]
45
+ # TODO: This needs a lot more type annotations
46
+ # NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat]
47
+ NumberType: TypeAlias = Union[bool, int, float, complex]
48
+ RealNumberType: TypeAlias = Union[bool, int, float]
49
+
50
+ Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat)
51
+ # I don't call it Integral because numbers.Integral includes bool, but IntLike
52
+ # does not
53
+ Dim = int
54
+ IntLike = (int, torch.SymInt)
55
+ FloatLike = (float, torch.SymFloat)
56
+ IntWithoutSymInt = int
57
+ FloatWithoutSymFloat = float
58
+ DeviceLikeType: TypeAlias = Union[str, torch.device, int]
59
+ Tensor = torch.Tensor
60
+
61
+
62
+ torch_function_passthrough = {
63
+ torch.device,
64
+ torch.sym_not,
65
+ torch.sym_float,
66
+ torch.sym_int,
67
+ torch.sym_max,
68
+ torch.sym_min,
69
+ torch._sym_sqrt, # type: ignore[attr-defined]
70
+ torch.sym_ite,
71
+ torch.Tensor.dim,
72
+ torch.Tensor.ndim.__get__, # type: ignore[attr-defined]
73
+ torch.Tensor.numel,
74
+ torch.Tensor.size,
75
+ torch.Tensor.storage_offset,
76
+ torch.Tensor.stride,
77
+ torch.Tensor.dtype.__get__, # type: ignore[attr-defined]
78
+ torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined]
79
+ torch.Tensor.shape.__get__, # type: ignore[attr-defined]
80
+ torch.Tensor.device.__get__, # type: ignore[attr-defined]
81
+ torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
82
+ torch.Tensor.layout.__get__, # type: ignore[attr-defined]
83
+ torch.Tensor.is_contiguous,
84
+ # For TorchRefsMode only
85
+ torch.Tensor.__format__,
86
+ torch.Tensor.__repr__,
87
+ torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
88
+ }
89
+
90
+
91
+ TensorLikeType = torch.Tensor
92
+ TensorLike = torch.Tensor
93
+ TensorSequenceType: TypeAlias = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]]
94
+ TensorOrNumberLikeType: TypeAlias = Union[TensorLikeType, NumberType]
95
+
96
+ CustomOutParamAnnotation = "__custom_out_param__"
97
+
98
+
99
+ def same_shape(a: ShapeType, b: ShapeType, *, allow_rhs_unbacked=False) -> bool:
100
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
101
+
102
+ if len(a) != len(b):
103
+ return False
104
+
105
+ for x, y in zip(a, b):
106
+ if allow_rhs_unbacked:
107
+ # TODO: We should check that the symbols are consistent
108
+ # with each other
109
+ if isinstance(y, torch.SymInt):
110
+ continue
111
+ # NB: Naively, you would not expect to have to do an oblivious guard
112
+ # here because there is seemingly no broadcasting here, but in fact we
113
+ # use this in some situations to determine if we need to do an expand
114
+ # on the tensor because they don't line up, so you can definitely end
115
+ # up trying to prove u0 != 1 in this situation. See
116
+ # python test/test_proxy_tensor.py -k test_cumsum_unbacked
117
+ if guard_size_oblivious(x != y):
118
+ return False
119
+
120
+ return True
121
+
122
+
123
+ def _maybe_get_pytype(t):
124
+ if t is torch.SymFloat:
125
+ return float
126
+ elif t is torch.SymInt:
127
+ return int
128
+ elif t is torch.SymBool:
129
+ return bool
130
+ else:
131
+ return t
132
+
133
+
134
+ # TODO: look at using torch.testing.assert_close instead with an option
135
+ # to just compare metadata
136
+ def compare_tensor_meta(
137
+ a: TensorLikeType,
138
+ b: TensorLikeType,
139
+ check_strides=False,
140
+ *,
141
+ allow_rhs_unbacked=False,
142
+ check_conj=True,
143
+ ):
144
+ """
145
+ Checks that two tensor likes have the same shape,
146
+ dtype and device.
147
+
148
+ In the future this will validate additional metadata, like
149
+ strides.
150
+ """
151
+ assert isinstance(a, TensorLike)
152
+ assert isinstance(b, TensorLike)
153
+
154
+ if not same_shape(a.shape, b.shape, allow_rhs_unbacked=allow_rhs_unbacked):
155
+ msg = f"Shapes {a.shape} and {b.shape} are not equal!"
156
+ raise AssertionError(msg)
157
+
158
+ if a.dtype != b.dtype:
159
+ msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!"
160
+ raise AssertionError(msg)
161
+
162
+ if a.device != b.device:
163
+ # Handles special cuda:0 vs cuda case
164
+ # TODO: we should review why this happens and see about fixing it
165
+ if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and (
166
+ str(b.device) == "cuda:0" or str(b.device) == "cuda"
167
+ ):
168
+ pass
169
+ else:
170
+ msg = f"Devices {a.device} and {b.device} are not equal!"
171
+ raise AssertionError(msg)
172
+
173
+ # Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050
174
+ if check_strides:
175
+ same_strides, idx = check_significant_strides(a, b)
176
+ if not same_strides:
177
+ msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!"
178
+ raise RuntimeError(msg)
179
+
180
+ if a.storage_offset() != b.storage_offset():
181
+ msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!"
182
+ raise RuntimeError(msg)
183
+
184
+ if check_conj:
185
+ if a.is_conj() != b.is_conj():
186
+ raise RuntimeError(
187
+ f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}"
188
+ )
189
+
190
+ if a.is_neg() != b.is_neg():
191
+ raise RuntimeError(
192
+ f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}"
193
+ )
194
+
195
+
196
+ def _check_strides_helper(
197
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True
198
+ ) -> Tuple[bool, Optional[int]]:
199
+ # NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch
200
+ # See https://github.com/pytorch/pytorch/issues/77553
201
+ # Only compares strides that are "meaningful" -- strides for dimensions with length > 1
202
+ # and for tensors with more than one element
203
+ if (
204
+ not only_cuda or a.device.type == "cuda" or b.device.type == "cuda"
205
+ ) and a.numel() > 0:
206
+ for idx in range(a.ndim):
207
+ check = not significant_only or a.shape[idx] > 1
208
+ if a.stride()[idx] != b.stride()[idx] and check:
209
+ return False, idx
210
+
211
+ return True, None
212
+
213
+
214
+ def check_significant_strides(
215
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
216
+ ) -> Tuple[bool, Optional[int]]:
217
+ return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True)
218
+
219
+
220
+ def check_all_strides(
221
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
222
+ ) -> Tuple[bool, Optional[int]]:
223
+ return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False)
224
+
225
+
226
+ # This function is equivalent to compute_contiguous() from TensorImpl.cpp
227
+ def is_contiguous(a: TensorLikeType) -> bool:
228
+ """
229
+ Tests whether a tensor is contiguous or not.
230
+
231
+ Tensors are contiguous when they have no elements,
232
+ one element, or when they have "nested" strides.
233
+ """
234
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
235
+
236
+ if guard_size_oblivious(a.numel() < 2):
237
+ return True
238
+
239
+ expected_stride = 1
240
+ for x, y in reversed(tuple(zip(a.shape, a.stride()))):
241
+ # Skips checking strides when a dimension has length 1
242
+ if guard_size_oblivious(x == 1):
243
+ continue
244
+
245
+ if y != expected_stride:
246
+ return False
247
+ expected_stride = expected_stride * x
248
+
249
+ return True
250
+
251
+
252
+ # This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp
253
+ def is_channels_last_contiguous_2d(a: Tensor) -> bool:
254
+ # NHWC or not channels last 2D contiguous
255
+ if a.ndim != 4:
256
+ return False
257
+
258
+ expected_stride = 1
259
+ for idx in (1, 3, 2, 0):
260
+ length = a.shape[idx]
261
+ if length == 1:
262
+ continue
263
+
264
+ stride = a.stride()[idx]
265
+ if stride != expected_stride:
266
+ return False
267
+
268
+ expected_stride *= length
269
+
270
+ return True
271
+
272
+
273
+ def is_channels_last_contiguous_3d(a: Tensor) -> bool:
274
+ # NDHWC or not channels last 3D contiguous
275
+ if a.ndim != 5:
276
+ return False
277
+
278
+ expected_stride = 1
279
+ for idx in (1, 4, 3, 2, 0):
280
+ length = a.shape[idx]
281
+ if length == 1:
282
+ continue
283
+
284
+ stride = a.stride()[idx]
285
+ if stride != expected_stride:
286
+ return False
287
+
288
+ expected_stride *= length
289
+
290
+ return True
291
+
292
+
293
+ _memory_formats = {
294
+ torch.contiguous_format,
295
+ torch.preserve_format,
296
+ torch.channels_last,
297
+ torch.channels_last_3d,
298
+ }
299
+
300
+
301
+ def validate_memory_format(memory_format: torch.memory_format):
302
+ torch._check(
303
+ memory_format in _memory_formats,
304
+ lambda: f"Received unknown memory format {memory_format}!",
305
+ )
306
+
307
+
308
+ def is_contiguous_for_memory_format( # type: ignore[return]
309
+ a: Tensor, *, memory_format: torch.memory_format
310
+ ) -> bool:
311
+ validate_memory_format(memory_format)
312
+
313
+ if memory_format == torch.contiguous_format:
314
+ return is_contiguous(a)
315
+ if memory_format == torch.channels_last:
316
+ return is_channels_last_contiguous_2d(a)
317
+ if memory_format == torch.channels_last_3d:
318
+ return is_channels_last_contiguous_3d(a)
319
+
320
+ torch._check(
321
+ False,
322
+ lambda: f"is_contiguous received unsupported memory format {memory_format}",
323
+ )
324
+
325
+
326
+ # NOTE: that tensors with no elements and channels last is ???
327
+ def is_channels_last_contiguous(a: Tensor) -> bool:
328
+ """
329
+ True when a tensor is channels-last contiguous.
330
+
331
+ This requires that:
332
+
333
+ - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions
334
+ - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the
335
+ stride of the 'C' dimension (Cs) is 1 and the strides corresponding to
336
+ each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are
337
+ "nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension,
338
+ for example.
339
+ """
340
+ return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)
341
+
342
+
343
+ def is_non_overlapping_and_dense(a: Tensor) -> bool:
344
+ """
345
+ True when a tensor is non-overlapping and dense.
346
+
347
+ A tensor is non-overlapping and dense when there exists a permutation of
348
+ its dimensions that is contiguous.
349
+ """
350
+
351
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
352
+
353
+ if a.is_sparse:
354
+ return False
355
+
356
+ # Short-circuits if the tensor is already contiguous or channels-last contiguous
357
+ if is_contiguous(a) or is_channels_last_contiguous(a):
358
+ return True
359
+
360
+ # The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp
361
+
362
+ # Short-circuits for tensors of rank one, which are
363
+ # non-overlapping and "dense" if their stride is one
364
+ if a.ndim == 1:
365
+ return a.stride()[0] == 1
366
+
367
+ # Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
368
+ # Sorts (length, stride) pairs by stride
369
+ #
370
+ # This sort is done in a size-oblivious way, which helps if we do a
371
+ # comparison like 2048*u0 > u0; we just want this to return True
372
+ # (and not worry about what if u0 is zero).
373
+ class K(NamedTuple):
374
+ size: int
375
+ stride: int
376
+
377
+ def __lt__(self, other):
378
+ return guard_size_oblivious(self.stride < other.stride)
379
+
380
+ def __gt__(self, other):
381
+ return guard_size_oblivious(self.stride > other.stride)
382
+
383
+ def __le__(self, other):
384
+ return guard_size_oblivious(self.stride <= other.stride)
385
+
386
+ def __ge__(self, other):
387
+ return guard_size_oblivious(self.stride >= other.stride)
388
+
389
+ def __eq__(self, other):
390
+ return guard_size_oblivious(self.stride == other.stride)
391
+
392
+ lengths_and_strides = sorted(map(K, a.shape, a.stride()))
393
+
394
+ expected_stride = 1
395
+ for length, stride in lengths_and_strides:
396
+ if guard_size_oblivious(length == 1):
397
+ continue
398
+
399
+ if stride != expected_stride:
400
+ return False
401
+
402
+ expected_stride *= length
403
+
404
+ return True
405
+
406
+
407
+ # NOTE: Based on the implementation in TensorIterator.cpp, but note that
408
+ # the note [Computing output strides] is incorrect, because it
409
+ # says that strides will be preserved even if they are not
410
+ # "non overlapping and dense", but this is incorrect. The
411
+ # output of elementwise operations are always given
412
+ # non overlapping and dense strides.
413
+ # This is also INCORRECT because it does not model TensorIterator's
414
+ # short-circuit, which can cause different strides.
415
+ def compute_elementwise_output_logical_to_physical_perm(
416
+ *tensors, _skip_checks=False
417
+ ) -> List[int]:
418
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
419
+
420
+ if not _skip_checks and len(tensors) == 0:
421
+ msg = "Can't compute elementwise output strides for zero tensors!"
422
+ raise ValueError(msg)
423
+
424
+ if not _skip_checks:
425
+ check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
426
+
427
+ # Filters the tensors to actual tensors
428
+ if not _skip_checks:
429
+ tensors = tuple(
430
+ a
431
+ for a in tensors
432
+ if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
433
+ )
434
+
435
+ # Short-circuits for CPU scalar case
436
+ if len(tensors) == 0:
437
+ return []
438
+
439
+ # Short-circuits for shapes with zero or one dimensions
440
+ # TODO: are these necessary?
441
+ ndim = tensors[0].ndim
442
+ if ndim == 0:
443
+ return []
444
+ if ndim == 1:
445
+ return [0]
446
+
447
+ # Short-circuits if contiguous, following the fake fast path.
448
+ # This reduces the number of guards we end up making
449
+ # TODO: do channels last too
450
+ is_contiguous = True
451
+ for t in tensors:
452
+ is_contiguous = is_contiguous and t.is_contiguous(
453
+ memory_format=torch.contiguous_format
454
+ )
455
+
456
+ if is_contiguous:
457
+ return list(range(ndim))
458
+
459
+ shape = tensors[0].shape
460
+
461
+ def should_swap(idx_a, idx_b):
462
+ for tensor in tensors:
463
+ stride_a = tensor.stride()[idx_a]
464
+ stride_b = tensor.stride()[idx_b]
465
+
466
+ if guard_size_oblivious(stride_a == 0) or guard_size_oblivious(
467
+ stride_b == 0
468
+ ):
469
+ continue
470
+
471
+ if guard_size_oblivious(stride_a < stride_b):
472
+ return -1
473
+
474
+ if guard_size_oblivious(stride_a > stride_b):
475
+ return 1
476
+
477
+ # stride_a == stride_b
478
+ if guard_size_oblivious(shape[idx_a] > shape[idx_b]):
479
+ return 1
480
+
481
+ # Note: this case is hit if all strides are zero,
482
+ # or all strides are equal and all dimensions have the same length
483
+ return 0
484
+
485
+ # The "sort" order for the permutation is back-to-front, but
486
+ # the natural order for permutations is front-to-back. Do the
487
+ # sorting back-to-front and then reverse it on output.
488
+ #
489
+ # also, note this returns the logical to physical shape permutation
490
+ perm = list(reversed(range(ndim)))
491
+
492
+ # insertion sort with support for ambiguous comparisons
493
+ for i in range(1, ndim):
494
+ dim1 = i
495
+ for dim0 in reversed(range(i)):
496
+ comparison = should_swap(perm[dim0], perm[dim1])
497
+ if comparison > 0:
498
+ perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
499
+ dim1 = dim0
500
+ elif comparison < 0:
501
+ break
502
+
503
+ return list(reversed(perm))
504
+
505
+
506
+ def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]:
507
+ """
508
+ Computes the output strides for elementwise operations.
509
+ """
510
+ if len(tensors) == 0:
511
+ msg = "Can't compute elementwise output strides for zero tensors!"
512
+ raise ValueError(msg)
513
+
514
+ check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
515
+
516
+ # Filters the tensors to actual tensors
517
+ tensors = tuple(
518
+ a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
519
+ )
520
+
521
+ # Short-circuits for CPU scalar case
522
+ if len(tensors) == 0:
523
+ return ()
524
+
525
+ ndim = tensors[0].ndim
526
+ shape = tensors[0].shape
527
+
528
+ if ndim == 0:
529
+ return ()
530
+ if ndim == 1:
531
+ return (1,)
532
+
533
+ logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm(
534
+ *tensors, _skip_checks=True
535
+ )
536
+ permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical
537
+
538
+ new_strides = make_contiguous_strides_for(permuted_shape)
539
+ permuted_strides = apply_perm(
540
+ new_strides, invert_perm(logical_to_physical_perm)
541
+ ) # to logical
542
+
543
+ return tuple(permuted_strides)
544
+
545
+
546
+ # Identity permutation is [0, 1, 2]
547
+ def apply_perm(inp, perm):
548
+ ndim = len(inp)
549
+ permuted_inp = [-1] * ndim
550
+ for idx, x in enumerate(perm):
551
+ permuted_inp[idx] = inp[x]
552
+ return permuted_inp
553
+
554
+
555
+ def invert_perm(perm):
556
+ ndim = len(perm)
557
+ new_perm = [-1] * ndim
558
+ for idx, x in enumerate(perm):
559
+ new_perm[x] = idx
560
+ return new_perm
561
+
562
+
563
+ #
564
+ # Common helper functions
565
+ #
566
+
567
+
568
+ def validate_dim_length(length: int):
569
+ """
570
+ Validates that an object represents a valid
571
+ dimension length.
572
+ """
573
+
574
+ if isinstance(length, (int, torch.SymInt)):
575
+ torch._check_is_size(length)
576
+ else:
577
+ # sometimes called with sympy expression by inductor
578
+ assert length >= 0
579
+
580
+
581
+ def validate_shape(shape: ShapeType):
582
+ """
583
+ Validates that a sequence represents a valid shape.
584
+ """
585
+
586
+ assert isinstance(shape, Sequence), type(shape)
587
+ for l in shape:
588
+ validate_dim_length(l)
589
+
590
+
591
+ def validate_strides(strides: StrideType):
592
+ """
593
+ Verifies the object specifies valid strides.
594
+ """
595
+
596
+ assert isinstance(strides, Sequence)
597
+ for stride in strides:
598
+ assert stride >= 0
599
+
600
+
601
+ def validate_idx(rank: int, idx: int):
602
+ """
603
+ Validates that idx is a valid index for the given shape.
604
+ Assumes the index is already canonicalized.
605
+ """
606
+
607
+ assert isinstance(idx, Dim)
608
+ assert isinstance(rank, Dim)
609
+
610
+ assert idx >= 0 and idx < rank or idx == 0
611
+
612
+
613
+ def validate_dimension_indices(rank: int, indices: DimsSequenceType):
614
+ for idx in indices:
615
+ validate_idx(rank, idx)
616
+
617
+
618
+ def validate_exclusive_idx(rank: int, ex_idx: int):
619
+ """
620
+ Validates that ex_idx is a valid exclusive index
621
+ for the given shape.
622
+ """
623
+
624
+ assert isinstance(ex_idx, Dim)
625
+ assert isinstance(rank, Dim)
626
+ assert ex_idx > 0 and ex_idx <= rank
627
+
628
+
629
+ # "Wraps" a dim (up to one time) for the given rank, allowing dims to be
630
+ # specified using negative indices. If `wrap_scalar` is true then scalar
631
+ # tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise,
632
+ # idx should be in the range [-rank, rank-1].
633
+ def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int:
634
+ if rank < 0:
635
+ msg = f"Rank cannot be negative but got {rank}"
636
+ raise IndexError(msg)
637
+
638
+ if rank == 0:
639
+ if not wrap_scalar:
640
+ msg = f"Dimension specified as {idx} but tensor has no dimensions"
641
+ raise IndexError(msg)
642
+ rank = 1
643
+
644
+ if idx >= 0 and idx < rank:
645
+ return idx
646
+
647
+ if idx < 0:
648
+ _idx = idx + rank
649
+ else:
650
+ _idx = idx
651
+
652
+ if _idx < 0 or _idx >= rank:
653
+ # Same error message as in aten/src/ATen/WrapDimUtils.h:49
654
+ msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})"
655
+ raise IndexError(msg)
656
+
657
+ return _idx
658
+
659
+
660
+ # Takes a dimension or sequence of dimensions and "wraps" them,
661
+ # mapping negative offsets to positive ones
662
+ @overload
663
+ def canonicalize_dims(
664
+ rank: int, indices: Sequence[int], wrap_scalar: bool = True
665
+ ) -> Tuple[int, ...]:
666
+ pass
667
+
668
+
669
+ @overload
670
+ def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int:
671
+ pass
672
+
673
+
674
+ def canonicalize_dims(rank, indices, wrap_scalar=True):
675
+ if isinstance(indices, Dim):
676
+ return canonicalize_dim(rank, indices, wrap_scalar)
677
+
678
+ return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices)
679
+
680
+
681
+ def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
682
+ """
683
+ Validates that perm is a permutation of length rank.
684
+ """
685
+
686
+ if not isinstance(perm, Sequence):
687
+ return False
688
+
689
+ if not (tuple(sorted(perm)) == tuple(range(0, rank))):
690
+ return False
691
+
692
+ return True
693
+
694
+
695
+ def is_same_shape(a: Sequence, b: Sequence) -> bool:
696
+ """
697
+ Compares two shapes a and b, returning True if they are the same
698
+ (their ranks and corresponding lengths match) and False otherwise.
699
+ """
700
+
701
+ return tuple(a) == tuple(b)
702
+
703
+
704
+ def is_cpu_scalar_tensor(a: Any) -> bool:
705
+ return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
706
+
707
+
708
+ def check_same_device(*args, allow_cpu_scalar_tensors):
709
+ """
710
+ Checks that all Tensors in args have the same device.
711
+
712
+ Raises a RuntimeError when:
713
+ - args contains an object whose type is not Tensor or Number
714
+ - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
715
+ """
716
+ # Short-circuits if all (one or fewer) arguments are trivially on the same device
717
+ if len(args) <= 1:
718
+ return
719
+
720
+ # Note: cannot initialize device to the first arg's device (it may not have one)
721
+ device = None
722
+ for arg in args:
723
+ if isinstance(arg, Number):
724
+ continue
725
+ elif isinstance(arg, TensorLike):
726
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
727
+ continue
728
+
729
+ if device is None:
730
+ device = arg.device
731
+
732
+ if device != arg.device:
733
+ msg = (
734
+ "Tensor on device "
735
+ + str(arg.device)
736
+ + " is not on the expected device "
737
+ + str(device)
738
+ + "!"
739
+ )
740
+ raise RuntimeError(msg)
741
+ else:
742
+ msg = (
743
+ "Unexpected type when checking for same device, " + str(type(arg)) + "!"
744
+ )
745
+ raise RuntimeError(msg)
746
+
747
+
748
+ def canonicalize_device(device: DeviceLikeType) -> torch.device:
749
+ if isinstance(device, torch.device):
750
+ return device
751
+
752
+ assert isinstance(device, str)
753
+ return torch.device(device)
754
+
755
+
756
+ # Asserts if any of the following are true:
757
+ # - a non-scalar or non-Tensor is given
758
+ # - the shape of any tensors is distinct
759
+ def check_same_shape(*args, allow_cpu_scalar_tensors: bool):
760
+ """
761
+ Checks that all Tensors in args have the same shape.
762
+
763
+ Raises a RuntimeError when:
764
+ - args contains an object whose type is not Tensor or Number
765
+ - two Tensor objects in args have different devices
766
+ """
767
+ shape = None
768
+
769
+ for arg in args:
770
+ if isinstance(arg, Number):
771
+ continue
772
+ elif isinstance(arg, TensorLike):
773
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
774
+ continue
775
+
776
+ if shape is None:
777
+ shape = arg.shape
778
+
779
+ if not is_same_shape(shape, arg.shape):
780
+ msg = f"Shape {arg.shape} is not the expected shape {shape}!"
781
+ raise RuntimeError(msg)
782
+ else:
783
+ msg = (
784
+ "Unexpected type when checking for same shape, " + str(type(arg)) + "!"
785
+ )
786
+ raise RuntimeError(msg)
787
+
788
+
789
+ # Acquires a common shape, if it exists, from one or more tensor arguments,
790
+ # filtering number arguments
791
+ def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]:
792
+ shape = None
793
+ scalar_shape = None
794
+
795
+ for arg in args:
796
+ if isinstance(arg, Number):
797
+ continue
798
+ elif isinstance(arg, TensorLike):
799
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
800
+ scalar_shape = arg.shape
801
+ continue
802
+
803
+ if shape is None:
804
+ shape = arg.shape
805
+
806
+ if not is_same_shape(shape, arg.shape):
807
+ return None
808
+ else:
809
+ return None
810
+
811
+ return shape if shape is not None else scalar_shape
812
+
813
+
814
+ # Extracts dimensions that might be passed either as a list/tuple or as varargs.
815
+ # A typical case is Tensor.permute .
816
+ def extract_dims_from_varargs(
817
+ dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]]
818
+ ) -> DimsSequenceType:
819
+ if dims and isinstance(dims[0], Sequence):
820
+ assert len(dims) == 1
821
+ dims = cast(Tuple[DimsSequenceType], dims)
822
+ return dims[0]
823
+ else:
824
+ return cast(DimsSequenceType, dims)
825
+
826
+
827
+ def extract_shape_from_varargs(
828
+ shape: Union[ShapeType, Tuple[ShapeType]],
829
+ validate=True,
830
+ ) -> Tuple[int, ...]:
831
+ """
832
+ Returns a shape from varargs.
833
+
834
+ In PyTorch, operations that accept shapes often accept them as varargs, like
835
+ foo(*shape). However a user can pass the shape as a sequence of integers,
836
+ like this:
837
+
838
+ foo(1, 2, 3)
839
+
840
+ or as a sequence of integers
841
+
842
+ foo((1, 2, 3))
843
+
844
+ In the first case shape will be a tuple of integers, and in the second case it's a tuple
845
+ containing a tuple of integers. This validates those inputs and canonicalizes them
846
+ to a tuple of integers.
847
+ """
848
+
849
+ # Handles tuple unwrapping
850
+ if len(shape) == 1 and isinstance(shape[0], Sequence):
851
+ shape = shape[0]
852
+
853
+ if validate:
854
+ validate_shape(shape) # type: ignore[arg-type]
855
+ return shape # type: ignore[return-value]
856
+
857
+
858
+ def infer_size_shapes(a: ShapeType, b: ShapeType) -> Tuple[int, ...]:
859
+ ndim = max(len(a), len(b))
860
+ expandedSizes = [0] * ndim
861
+
862
+ for i in range(ndim - 1, -1, -1):
863
+ offset = ndim - 1 - i
864
+ dimA = len(a) - 1 - offset
865
+ dimB = len(b) - 1 - offset
866
+ sizeA = a[dimA] if dimA >= 0 else 1
867
+ sizeB = b[dimB] if dimB >= 0 else 1
868
+
869
+ torch._check(
870
+ (sizeA == sizeB) or (sizeA == 1) or (sizeB == 1),
871
+ lambda: (
872
+ f"The size of tensor a ({sizeA}) must match the size of "
873
+ f"tensor b ({sizeB}) at non-jagged dimension {i}"
874
+ ),
875
+ )
876
+
877
+ # 1s map to the other size (even 0)
878
+ expandedSizes[i] = sizeB if sizeA == 1 else sizeA
879
+
880
+ return tuple(expandedSizes)
881
+
882
+
883
+ def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]:
884
+ """
885
+ Infers the size of a dim with size -1, if it exists.
886
+ Also checks that new shape is compatible with the number of elements.
887
+ """
888
+ dim = None
889
+ newsize = 1
890
+ for i, d in enumerate(shape):
891
+ if d == -1:
892
+ torch._check(dim is None, lambda: "only one dimension can be inferred")
893
+ dim = i
894
+ elif d >= 0:
895
+ newsize *= d
896
+ else:
897
+ torch._check(False, lambda: f"invalid shape dimension {d}")
898
+ if dim is None:
899
+ torch._check(
900
+ numel == newsize,
901
+ lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
902
+ )
903
+ else:
904
+ from torch.fx.experimental.symbolic_shapes import definitely_true
905
+
906
+ torch._check(
907
+ newsize != 0,
908
+ lambda: (
909
+ f"cannot reshape tensor of 0 elements into shape {list(shape)} because the "
910
+ f"unspecified dimension size -1 can be any value and is ambiguous"
911
+ if definitely_true(numel == 0)
912
+ else f"shape '{list(shape)}' is invalid for input of size {numel}"
913
+ ),
914
+ )
915
+ torch._check(
916
+ numel % newsize == 0,
917
+ lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
918
+ )
919
+ # Convert to list to produce a compatible error message with core
920
+ # PyTorch, which prints sequences in square brackets.
921
+ shape = list(shape)
922
+ shape[dim] = numel // newsize
923
+ # NB: This is pretty important when you have unbacked SymInts.
924
+ # Suppose you have (i0, 12) resizing into (2, -1, 12). The old
925
+ # range for i0 is typically [2, inf], which means if you divide
926
+ # by two the new range should be [1, inf]. But this is bad news
927
+ # if you have an unbacked SymInt: we need to reapply the unsound
928
+ # assumption that the size is >= 2.
929
+ torch._check_is_size(shape[dim])
930
+ return tuple(shape)
931
+
932
+
933
+ _integer_dtypes = (
934
+ torch.uint8,
935
+ torch.uint16,
936
+ torch.uint32,
937
+ torch.uint64,
938
+ torch.int8,
939
+ torch.int16,
940
+ torch.int32,
941
+ torch.int64,
942
+ )
943
+ _low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
944
+ _complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
945
+
946
+
947
+ def is_boolean_dtype(dtype: torch.dtype) -> bool:
948
+ assert isinstance(dtype, torch.dtype)
949
+ return dtype is torch.bool
950
+
951
+
952
+ def is_integer_dtype(dtype: torch.dtype) -> bool:
953
+ assert isinstance(dtype, torch.dtype)
954
+ return dtype in _integer_dtypes
955
+
956
+
957
+ def is_low_precision_dtype(dtype: torch.dtype) -> bool:
958
+ assert isinstance(dtype, torch.dtype)
959
+ return dtype in _low_precision_dtypes
960
+
961
+
962
+ def is_float_dtype(dtype: torch.dtype) -> bool:
963
+ assert isinstance(dtype, torch.dtype)
964
+ return dtype.is_floating_point
965
+
966
+
967
+ def is_complex_dtype(dtype: torch.dtype) -> bool:
968
+ assert isinstance(dtype, torch.dtype)
969
+ return dtype in _complex_dtypes
970
+
971
+
972
+ def is_grad_dtype(dtype: torch.dtype) -> bool:
973
+ """
974
+ Checks if the dtype can require a gradient.
975
+ """
976
+ return dtype.is_floating_point or is_complex_dtype(dtype)
977
+
978
+
979
+ _complex_to_real_dtype_map = {
980
+ torch.complex128: torch.float64,
981
+ torch.complex64: torch.float32,
982
+ torch.complex32: torch.float16,
983
+ }
984
+
985
+ _real_to_complex_dtype_map = {
986
+ torch.float16: torch.complex32,
987
+ torch.bfloat16: torch.complex64,
988
+ torch.float32: torch.complex64,
989
+ torch.float64: torch.complex128,
990
+ }
991
+
992
+
993
+ def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
994
+ return _complex_to_real_dtype_map[dtype]
995
+
996
+
997
+ def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
998
+ return _real_to_complex_dtype_map[dtype]
999
+
1000
+
1001
+ def dtype_to_type(dtype: torch.dtype) -> type:
1002
+ """
1003
+ Computes the corresponding Python type (AKA "type kind") for the
1004
+ given dtype.
1005
+ """
1006
+ assert isinstance(dtype, torch.dtype)
1007
+
1008
+ if dtype is torch.bool:
1009
+ return bool
1010
+ if dtype in _integer_dtypes:
1011
+ return int
1012
+ if dtype.is_floating_point:
1013
+ return float
1014
+ if dtype in _complex_dtypes:
1015
+ return complex
1016
+
1017
+ raise ValueError("Invalid dtype!")
1018
+
1019
+
1020
+ def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]:
1021
+ """
1022
+ Computes the corresponding Python type constructor for the
1023
+ given dtype.
1024
+ """
1025
+ assert isinstance(dtype, torch.dtype)
1026
+
1027
+ if dtype is torch.bool:
1028
+ return lambda x: bool(x)
1029
+ if dtype in _integer_dtypes:
1030
+ return sym_int
1031
+ if dtype.is_floating_point:
1032
+ return sym_float
1033
+ if dtype in _complex_dtypes:
1034
+ # TODO: type error here is real, replace with sym_complex
1035
+ return lambda x: complex(x) # type: ignore[arg-type]
1036
+
1037
+ raise ValueError("Invalid dtype!")
1038
+
1039
+
1040
+ def type_to_dtype(typ: type) -> torch.dtype:
1041
+ """
1042
+ Computes the corresponding dtype for a Number type.
1043
+ """
1044
+
1045
+ assert isinstance(typ, type)
1046
+
1047
+ if typ is bool:
1048
+ return torch.bool
1049
+ if typ in [int, torch.SymInt]:
1050
+ return torch.long
1051
+ if typ in [float, torch.SymFloat]:
1052
+ return torch.get_default_dtype()
1053
+ # TODO: sym_complex_float?
1054
+ if typ is complex:
1055
+ return corresponding_complex_dtype(torch.get_default_dtype())
1056
+
1057
+ raise ValueError("Invalid type!")
1058
+
1059
+
1060
+ def get_dtype(x: Union[torch.Tensor, NumberType]):
1061
+ if isinstance(x, torch.Tensor):
1062
+ return x.dtype
1063
+ else:
1064
+ return type_to_dtype(type(x))
1065
+
1066
+
1067
+ _ordered_types = (bool, int, float, complex)
1068
+
1069
+
1070
+ def check_fp_or_complex(
1071
+ dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True
1072
+ ):
1073
+ """
1074
+ Checks whether the input is floating point or complex.
1075
+ If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32
1076
+ """
1077
+ torch._check(
1078
+ is_float_dtype(dtype) or is_complex_dtype(dtype),
1079
+ lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}",
1080
+ )
1081
+ torch._check(
1082
+ allow_low_precision_dtypes or not is_low_precision_dtype(dtype),
1083
+ lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}",
1084
+ )
1085
+
1086
+
1087
+ def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"):
1088
+ torch._check(
1089
+ len(A.shape) >= 2,
1090
+ lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
1091
+ )
1092
+
1093
+
1094
+ def get_higher_type(a: type, b: type) -> type:
1095
+ """
1096
+ Returns the higher of the two given Number types.
1097
+
1098
+ The types are ordered bool -> int -> float -> complex.
1099
+ """
1100
+ a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
1101
+ # Type checking
1102
+ if a not in _ordered_types or b not in _ordered_types:
1103
+ raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
1104
+
1105
+ if a is b:
1106
+ return a
1107
+
1108
+ for typ in _ordered_types:
1109
+ if a is typ:
1110
+ return b
1111
+ if b is typ:
1112
+ return a
1113
+
1114
+ raise ValueError("Unknown Python scalar type!")
1115
+
1116
+
1117
+ # Returns the higher of two torch datatypes a and b or, if the two
1118
+ # are not ordered relative to each other, the next
1119
+ # higher datatype
1120
+ def get_higher_dtype(
1121
+ a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
1122
+ b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
1123
+ ) -> Optional[torch.dtype]:
1124
+ """
1125
+ Computes the "lowest" datatype that is weakly
1126
+ "higher" than both a and b.
1127
+ """
1128
+
1129
+ # Type checking
1130
+ assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
1131
+ assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
1132
+
1133
+ def _extract_dtype(
1134
+ x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
1135
+ ) -> Optional[torch.dtype]:
1136
+ if x is None:
1137
+ return None
1138
+ if isinstance(x, torch.dtype):
1139
+ return x
1140
+ if isinstance(x, TensorLike):
1141
+ return x.dtype
1142
+ if isinstance(x, Number):
1143
+ return type_to_dtype(type(x))
1144
+
1145
+ raise RuntimeError("Unexpected type given to _extract_dtype!")
1146
+
1147
+ a, b = _extract_dtype(a), _extract_dtype(b)
1148
+
1149
+ if a is b:
1150
+ return a
1151
+
1152
+ if a is None:
1153
+ return b
1154
+
1155
+ if b is None:
1156
+ return a
1157
+
1158
+ ordered_datatypes = (
1159
+ (torch.bool,),
1160
+ (torch.uint8, torch.int8),
1161
+ (torch.int16,),
1162
+ (torch.int32,),
1163
+ (torch.int64,),
1164
+ (torch.float16, torch.bfloat16),
1165
+ (torch.float32,),
1166
+ (torch.float64,),
1167
+ (torch.complex32,),
1168
+ (torch.complex64,),
1169
+ (torch.complex128,),
1170
+ )
1171
+
1172
+ for idx, dtypes in enumerate(ordered_datatypes):
1173
+ if a in dtypes and b in dtypes:
1174
+ return ordered_datatypes[idx + 1][0]
1175
+ if a in dtypes:
1176
+ return b
1177
+ if b in dtypes:
1178
+ return a
1179
+
1180
+ raise RuntimeError("Unexpected termination!")
1181
+
1182
+
1183
+ def check_pin_memory(pin_memory: bool):
1184
+ torch._check_not_implemented(
1185
+ not pin_memory, lambda: "PrimTorch does not support pinned memory"
1186
+ )
1187
+
1188
+
1189
+ def check_layout(layout: torch.layout):
1190
+ torch._check_not_implemented(
1191
+ layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}"
1192
+ )
1193
+
1194
+
1195
+ # TODO: maybe unify with can_cast_to?
1196
+ def is_weakly_lesser_type(a: type, b: type) -> bool:
1197
+ """
1198
+ Compares two types, a and b, returning True if a is weakly "less" than b.
1199
+
1200
+ The comparison is determined by the following type ordering: bool, int, float, complex.
1201
+ """
1202
+
1203
+ a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
1204
+
1205
+ if a not in _ordered_types or b not in _ordered_types:
1206
+ raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
1207
+
1208
+ for typ in _ordered_types:
1209
+ if a == typ:
1210
+ return True
1211
+ if b == typ:
1212
+ return False
1213
+
1214
+ raise RuntimeError("Unexpected termination!")
1215
+
1216
+
1217
+ def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
1218
+ for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
1219
+ if fn(cast_to):
1220
+ return True
1221
+ if fn(cast_from):
1222
+ return False
1223
+
1224
+ raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!")
1225
+
1226
+
1227
+ def check_same_dtype(*args):
1228
+ """
1229
+ Checks that all Tensors in args have the same device and that all Numbers have the
1230
+ same corresponding Python type.
1231
+
1232
+ Raises a RuntimeError when:
1233
+ - args contains an object whose type is not Tensor or Number
1234
+ - two Tensors objects in args have different dtypes
1235
+ - two Number objects in args have different types
1236
+ - there are Tensors and Numbers in args, and one of those Tensors corresponding
1237
+ Python types is different from the type of one of those Numbers
1238
+ """
1239
+ full_dtype = None
1240
+ scalar_type = None
1241
+
1242
+ for arg in args:
1243
+ if isinstance(arg, Number):
1244
+ # Scalar type checking is disabled (and may be removed in the future)
1245
+ continue
1246
+ # if scalar_type is None:
1247
+ # scalar_type = type(arg)
1248
+
1249
+ # if scalar_type is not type(arg):
1250
+ # msg = (
1251
+ # "Scalar of type "
1252
+ # + str(type(arg))
1253
+ # + " is not the expected type of "
1254
+ # + str(scalar_type)
1255
+ # + "!"
1256
+ # )
1257
+ # raise RuntimeError(msg)
1258
+ elif isinstance(arg, TensorLike):
1259
+ if full_dtype is None:
1260
+ full_dtype = arg.dtype
1261
+ if scalar_type is None:
1262
+ scalar_type = dtype_to_type(arg.dtype)
1263
+
1264
+ if full_dtype is not arg.dtype:
1265
+ msg = (
1266
+ "Tensor with dtype "
1267
+ + str(arg.dtype)
1268
+ + " is not the expected dtype of "
1269
+ + str(full_dtype)
1270
+ + "!"
1271
+ )
1272
+ raise RuntimeError(msg)
1273
+
1274
+ arg_type = dtype_to_type(arg.dtype)
1275
+ if arg_type is not scalar_type:
1276
+ msg = (
1277
+ "Tensor with corresponding Python type "
1278
+ + str(arg_type)
1279
+ + " is not the expected type of "
1280
+ + str(scalar_type)
1281
+ + "!"
1282
+ )
1283
+ raise RuntimeError(msg)
1284
+ else:
1285
+ msg = (
1286
+ "Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
1287
+ )
1288
+ raise RuntimeError(msg)
1289
+
1290
+
1291
+ # Maps datatypes to their computation types for elementwise operations
1292
+ _computation_dtype_map = {
1293
+ torch.bfloat16: torch.float32,
1294
+ torch.float16: torch.float32,
1295
+ torch.complex32: torch.complex64,
1296
+ }
1297
+
1298
+
1299
+ def get_computation_dtype(dtype: torch.dtype) -> torch.dtype:
1300
+ return _computation_dtype_map.get(dtype, dtype)
1301
+
1302
+
1303
+ _cpu_acc_type_map = {
1304
+ torch.bfloat16: torch.float64,
1305
+ torch.float16: torch.float64,
1306
+ torch.float32: torch.float64,
1307
+ torch.complex32: torch.complex128,
1308
+ torch.complex64: torch.complex128,
1309
+ }
1310
+
1311
+
1312
+ def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype:
1313
+ # Equivalent to at::toAccumulateType, prefer computation_dtype where possible
1314
+ if device.type == "cpu":
1315
+ return _cpu_acc_type_map.get(dtype, dtype)
1316
+ else:
1317
+ return get_computation_dtype(dtype)
1318
+
1319
+
1320
+ class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum):
1321
+ DEFAULT = (0,)
1322
+ NO_OPMATH = (1,)
1323
+ INT_TO_FLOAT = (2,)
1324
+ ALWAYS_BOOL = (3,)
1325
+ COMPLEX_TO_FLOAT = (4,)
1326
+ BOOL_TO_LONG = (5,)
1327
+
1328
+
1329
+ class REDUCTION_OUTPUT_TYPE_KIND(Enum):
1330
+ SAME = (0,)
1331
+ COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type
1332
+ KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean
1333
+ ALWAYS_BOOL = (3,)
1334
+
1335
+
1336
+ # Describes the return type of the primitive:
1337
+ #
1338
+ # - NEW, a new tensor is created
1339
+ # - VIEW, a view of an input tensor is returned
1340
+ # - INPLACE, one or more input tensors is modified
1341
+ #
1342
+ # these descriptors are mututally exclusive and exhaustive.
1343
+ class RETURN_TYPE(Enum):
1344
+ NEW = (0,)
1345
+ VIEW = (1,)
1346
+ INPLACE = (2,)
1347
+
1348
+
1349
+ # TODO: when NumberType contains the sym types, can simplify this
1350
+ def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type:
1351
+ if isinstance(x, torch.SymInt):
1352
+ return int
1353
+ elif isinstance(x, torch.SymFloat):
1354
+ return float
1355
+ else:
1356
+ return type(x)
1357
+
1358
+
1359
+ def expr_type(x: sympy.Expr) -> Type:
1360
+ if x.is_integer: # type: ignore[attr-defined]
1361
+ return int
1362
+ else:
1363
+ # NB: Not strictly correct, but we don't support SymPy complex or bool.
1364
+ return float
1365
+
1366
+
1367
+ # TODO: document type promotion kinds
1368
+ def elementwise_dtypes(
1369
+ *_args,
1370
+ type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
1371
+ ) -> Tuple[torch.dtype, torch.dtype]:
1372
+ """
1373
+ Computes the computation and result dtypes for elementwise type promotion
1374
+ on the given arguments and with the given elementwise type promotion kind.
1375
+
1376
+ Note that not all inputs to an elementwise operation necessarily participate in type promotion.
1377
+ For example, the "alpha" parameter of torch.add does not participate in type promotion,
1378
+ although it may be cast to the Python type corresponding to the computation dtype that
1379
+ the type promotion algorithm determines.
1380
+
1381
+ Default elementwise type promotion, which all other type promotion kinds tweak (see below),
1382
+ first decides which of four ordered types to use:
1383
+
1384
+ bool -> integer -> floating point -> complex
1385
+
1386
+ The selected type is the "lowest" type in the above list such that all number arguments
1387
+ have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
1388
+ type for their dtype.
1389
+
1390
+ Once the type is determined, the particular result dtype is found. The dtypes are
1391
+ partially ordered as follows:
1392
+
1393
+ bool -> uint8, int8 -> int16 -> int32 -> int64 ->
1394
+ float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
1395
+
1396
+ The result dtype is selected by:
1397
+ - if no tensor's dtype has the same corresponding type as the one selected,
1398
+ then the result dtype is the (default) dtype corresponding to the selected type
1399
+ (for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
1400
+ - if the result type is complex then the dtype is:
1401
+ - the default complex dtype if there are no floating point or complex tensors
1402
+ - if there are floating point or complex tensors with one or more dimensions, then
1403
+ the complex dtype corresponding to the highest corresponding complex dtype among those tensors
1404
+ (for example, double + cfloat -> cdouble)
1405
+ - if there are only floating point or complex tensors with zero dimensions, then
1406
+ the complex dtype corresponding to the highest corresponding complex dtype among those tensors
1407
+ - if the first two cases do not apply, the result dtype is the highest dtype among
1408
+ all tensors with one or more dimensions of the output type, and if there are no such
1409
+ tensors then it's the highest dtype among all tensors with zero dimensions of the output type
1410
+ (for example, long + half -> half, even if the half tensor has zero dimensions)
1411
+
1412
+ The "corresponding complex dtypes" are:
1413
+ float16 -> complex32
1414
+ bfloat16 -> complex64
1415
+ float32 -> complex64
1416
+ float64 -> complex128
1417
+ complex32 -> complex32
1418
+ complex64 -> complex64
1419
+ complex128 -> complex128
1420
+
1421
+ The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
1422
+ dtype by mapping low precision floating point and complex dtypes as follows:
1423
+
1424
+ float16 -> float32
1425
+ bfloat16 -> float32
1426
+ complex32 -> complex64
1427
+
1428
+ This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
1429
+ computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
1430
+ which perform no mathematical operations on their tensors (see below for examples).
1431
+
1432
+ The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype,
1433
+ and computation dtypes to the appropriate op math dtype.
1434
+
1435
+ The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
1436
+ mapping:
1437
+
1438
+ complex32 -> float16
1439
+ complex64 -> float32
1440
+ complex128 -> float64
1441
+
1442
+ Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
1443
+
1444
+ The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
1445
+
1446
+ The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
1447
+
1448
+ Example operators for each type promotion option:
1449
+ DEFAULT : add
1450
+ NO_OPMATH : where, nextafter, cat
1451
+ INT_TO_FLOAT : sin
1452
+ COMPLEX_TO_FLOAT : abs
1453
+ BOOL_TO_LONG : pow
1454
+ ALWAYS_BOOL : eq
1455
+
1456
+ """
1457
+
1458
+ args = tuple(x for x in _args if x is not None)
1459
+
1460
+ highest_type: type = bool
1461
+
1462
+ # Import sympy locally, as importing it eagerly at a module level is too slow
1463
+ # See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589
1464
+ import sympy
1465
+
1466
+ for x in args:
1467
+ if not isinstance(x, (Number, TensorLike, sympy.Expr)):
1468
+ msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!"
1469
+ raise ValueError(msg)
1470
+
1471
+ if isinstance(x, Number):
1472
+ highest_type = get_higher_type(highest_type, number_type(x))
1473
+ elif isinstance(x, sympy.Expr):
1474
+ highest_type = get_higher_type(highest_type, expr_type(x))
1475
+ else:
1476
+ # x is a TensorLike
1477
+ highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
1478
+
1479
+ result_dtype = None
1480
+
1481
+ def _find_highest_dtype_filtered(
1482
+ args, filter, *, float_as_complex=False
1483
+ ) -> Optional[torch.dtype]:
1484
+ zero_dim_tensor_dtype = None
1485
+ one_plus_dim_tensor_dtype = None
1486
+ for x in args:
1487
+ if isinstance(x, TensorLike) and filter(x.dtype):
1488
+ _dtype = x.dtype
1489
+ if float_as_complex and is_float_dtype(_dtype):
1490
+ _dtype = corresponding_complex_dtype(_dtype)
1491
+ if x.ndim == 0:
1492
+ zero_dim_tensor_dtype = get_higher_dtype(
1493
+ zero_dim_tensor_dtype, _dtype
1494
+ )
1495
+ else:
1496
+ # x.ndim > 0
1497
+ one_plus_dim_tensor_dtype = get_higher_dtype(
1498
+ one_plus_dim_tensor_dtype, _dtype
1499
+ )
1500
+
1501
+ # Prefers dtype of tensors with one or more dimensions
1502
+ if one_plus_dim_tensor_dtype is not None:
1503
+ return one_plus_dim_tensor_dtype
1504
+
1505
+ return zero_dim_tensor_dtype
1506
+
1507
+ if highest_type is float:
1508
+ result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
1509
+ result_dtype = (
1510
+ torch.get_default_dtype() if result_dtype is None else result_dtype
1511
+ )
1512
+ elif highest_type is complex:
1513
+ result_dtype = _find_highest_dtype_filtered(
1514
+ args,
1515
+ lambda x: is_float_dtype(x) or is_complex_dtype(x),
1516
+ float_as_complex=True,
1517
+ )
1518
+ if result_dtype is None:
1519
+ result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
1520
+ elif highest_type is int:
1521
+ result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
1522
+ result_dtype = torch.long if result_dtype is None else result_dtype
1523
+ else:
1524
+ # highest_type is bool
1525
+ result_dtype = torch.bool
1526
+
1527
+ if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
1528
+ return get_computation_dtype(result_dtype), result_dtype
1529
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
1530
+ return result_dtype, result_dtype
1531
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
1532
+ if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
1533
+ result_dtype = torch.get_default_dtype()
1534
+ return get_computation_dtype(result_dtype), result_dtype
1535
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
1536
+ # NOTE: computation can still occur in a complex dtype
1537
+ computation_dtype = get_computation_dtype(result_dtype)
1538
+ if is_complex_dtype(result_dtype):
1539
+ result_dtype = corresponding_real_dtype(result_dtype)
1540
+ return computation_dtype, result_dtype
1541
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
1542
+ if is_boolean_dtype(result_dtype):
1543
+ return torch.long, torch.long
1544
+ return get_computation_dtype(result_dtype), result_dtype
1545
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
1546
+ return get_computation_dtype(result_dtype), torch.bool
1547
+ else:
1548
+ raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}")
1549
+
1550
+
1551
+ def reduction_dtypes(
1552
+ arg,
1553
+ output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
1554
+ dtype: Optional[torch.dtype] = None,
1555
+ ) -> Tuple[torch.dtype, Optional[torch.dtype]]:
1556
+ # even though some reductions, like amin or amax, don't strictly require type promotion,
1557
+ # all the math ops (including comparisons) are still defined only for a computation type,
1558
+ # so promotion will still happen. We are doing it explicitly here
1559
+ inp_dtype = dtype if dtype is not None else arg.dtype
1560
+ computation_dtype = get_computation_dtype(inp_dtype)
1561
+ if (
1562
+ output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
1563
+ or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
1564
+ ):
1565
+ result_dtype = dtype if dtype else arg.dtype
1566
+ if (
1567
+ output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
1568
+ and is_complex_dtype(result_dtype)
1569
+ ):
1570
+ result_dtype = corresponding_real_dtype(result_dtype)
1571
+ elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
1572
+ result_dtype = None
1573
+ else: # ALWAYS_BOOL
1574
+ result_dtype = torch.bool
1575
+ return computation_dtype, result_dtype
1576
+
1577
+
1578
+ # This function's logic is borrowed from the following functions defined in C++:
1579
+ # batched_matrix_contiguous_strides and contiguous_strides
1580
+ def make_contiguous_strides_for(
1581
+ shape: ShapeType, row_major: bool = True
1582
+ ) -> Tuple[int, ...]:
1583
+ """
1584
+ Returns the strides of a contiguous tensor if row_major
1585
+ If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
1586
+ This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
1587
+ """
1588
+ # contiguous_strides from c10/util/strides.h
1589
+ validate_shape(shape)
1590
+ if not shape:
1591
+ return ()
1592
+
1593
+ from torch.fx.experimental.symbolic_shapes import is_nested_int
1594
+
1595
+ multiplier = 1
1596
+ strides = []
1597
+ for l in reversed(shape):
1598
+ strides.append(multiplier)
1599
+ multiplier *= l if is_nested_int(l) else sym_max(l, 1)
1600
+
1601
+ result = tuple(reversed(strides))
1602
+
1603
+ # batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h
1604
+ if row_major:
1605
+ return result
1606
+ else:
1607
+ if len(shape) < 2:
1608
+ return result
1609
+ return result[:-2] + (1, max(shape[-2], 1))
1610
+
1611
+
1612
+ def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1613
+ torch._check(
1614
+ len(shape) == 3,
1615
+ lambda: "Only tensors of rank 3 can use the channels_last_1d memory format",
1616
+ )
1617
+
1618
+ multiplier = 1
1619
+ strides = [0] * 3
1620
+ for idx in (1, -1, 0):
1621
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1622
+ # This is consistent with eager
1623
+ strides[idx] = multiplier
1624
+ multiplier *= shape[idx]
1625
+
1626
+ return tuple(strides)
1627
+
1628
+
1629
+ def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1630
+ # TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
1631
+ torch._check(
1632
+ len(shape) == 4,
1633
+ lambda: "Only tensors of rank 4 can use the channels_last memory format",
1634
+ )
1635
+
1636
+ multiplier = 1
1637
+ strides = [0] * 4
1638
+ for idx in (1, -1, -2, 0):
1639
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1640
+ # This is consistent with eager
1641
+ strides[idx] = multiplier
1642
+ multiplier *= shape[idx]
1643
+
1644
+ return tuple(strides)
1645
+
1646
+
1647
+ def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1648
+ torch._check(
1649
+ len(shape) == 5,
1650
+ lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
1651
+ )
1652
+
1653
+ multiplier = 1
1654
+ strides = [0] * 5
1655
+ for idx in (1, -1, -2, -3, 0):
1656
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1657
+ # This is consistent with eager
1658
+ strides[idx] = multiplier
1659
+ multiplier *= shape[idx]
1660
+
1661
+ return tuple(strides)
1662
+
1663
+
1664
+ def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1665
+ ndim = len(shape) if isinstance(shape, Sequence) else 1
1666
+ if ndim == 3:
1667
+ return make_channels_last_1d_strides_for(shape)
1668
+ elif ndim == 4:
1669
+ return make_channels_last_2d_strides_for(shape)
1670
+ elif ndim == 5:
1671
+ return make_channels_last_3d_strides_for(shape)
1672
+ else:
1673
+ raise RuntimeError(
1674
+ f"no channels last format strides exist in {ndim} dimensions"
1675
+ )
1676
+
1677
+
1678
+ def compute_reduction_output_shape(
1679
+ shape: ShapeType, dimensions: Sequence
1680
+ ) -> Tuple[int, ...]:
1681
+ for idx in dimensions:
1682
+ validate_idx(len(shape), idx)
1683
+
1684
+ new_shape = []
1685
+ for idx in range(len(shape)):
1686
+ if idx in dimensions:
1687
+ continue
1688
+
1689
+ new_shape.append(shape[idx])
1690
+
1691
+ return tuple(new_shape)
1692
+
1693
+
1694
+ def validate_no_repeating_dims(dims: Sequence):
1695
+ if len(dims) != len(set(dims)):
1696
+ raise RuntimeError("duplicate value in the list of dims")
1697
+
1698
+
1699
+ def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]:
1700
+ if dims is None:
1701
+ return tuple(range(len(shape)))
1702
+ dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
1703
+ validate_no_repeating_dims(dims)
1704
+ return dims
1705
+
1706
+
1707
+ def set_correction(
1708
+ unbiased: Optional[bool] = None,
1709
+ correction: Optional[NumberType] = None,
1710
+ ) -> float:
1711
+ if correction is not None and unbiased is not None:
1712
+ raise RuntimeError("cannot specify both correction and unbiased arguments")
1713
+ elif correction is None and unbiased is None:
1714
+ correction = 1.0
1715
+ elif correction is None and unbiased is not None:
1716
+ correction = 0.0 if unbiased is False else 1.0
1717
+ # NB: we don't actually support symint here, but it's harmless to accept
1718
+ if not isinstance(correction, (IntLike, FloatLike)):
1719
+ raise ValueError("correction argument should be integer or float")
1720
+ if correction < 0:
1721
+ raise ValueError("correction argument should be non-negative")
1722
+ return sym_float(correction)
1723
+
1724
+
1725
+ def compute_required_storage_length(
1726
+ shape: ShapeType, strides: StrideType, storage_offset: int
1727
+ ) -> int:
1728
+ """Computes the minimum storage size to hold the given tensor geometry.
1729
+
1730
+ Example
1731
+ =======
1732
+
1733
+ This is the size of a newly allocated tensor's storage, in units of elements
1734
+
1735
+ >>> t = torch.empty((10, 20))
1736
+ >>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
1737
+ 200
1738
+
1739
+ >>> # xdoctest: +SKIP(failing)
1740
+ >>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
1741
+ >>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
1742
+ >>> size == t.storage().size()
1743
+ True
1744
+
1745
+ A valid tensor may have a larger storage size, but never smaller
1746
+
1747
+ >>> slice = torch.empty(100)[20:40]
1748
+ >>> slice.storage().size()
1749
+ 100
1750
+
1751
+ >>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset())
1752
+ 40
1753
+
1754
+ """
1755
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
1756
+
1757
+ # Short-circuits if the shape has no elements
1758
+ if guard_size_oblivious(reduce(operator.mul, shape, 1) == 0):
1759
+ return 0
1760
+
1761
+ max_offset = sum((x - 1) * y for x, y in zip(shape, strides))
1762
+ # +1 to account for the first element which offsets are taken from
1763
+ return 1 + storage_offset + max_offset
1764
+
1765
+
1766
+ def check_in_bounds_for_storage(
1767
+ a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
1768
+ ):
1769
+ """
1770
+ Determines if the given shape, strides, and offset are valid for the given storage.
1771
+ """
1772
+
1773
+ required_length = compute_required_storage_length(shape, strides, storage_offset)
1774
+ if a.size() < required_length:
1775
+ msg = (
1776
+ "Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, "
1777
+ "which requires a storage of size {}".format(
1778
+ a.size(), storage_offset, str(shape), str(strides), required_length
1779
+ )
1780
+ )
1781
+ raise ValueError(msg)
1782
+
1783
+
1784
+ # NOTE: This function should ideally be removed, but some Meta internal models
1785
+ # packaged with `torch.package` are using it, so it will have to be removed
1786
+ # at some point in the future when those models no longer use this function.
1787
+ def check(
1788
+ b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
1789
+ ) -> None:
1790
+ """
1791
+ Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
1792
+ Error message is a callable producing a string (to avoid wasting time
1793
+ string formatting in non-error case, and also to make it easier for torchdynamo
1794
+ to trace.)
1795
+
1796
+ .. note:: This function is planned for removal in the future. Please use
1797
+ `torch._check*` functions instead.
1798
+ """
1799
+ warnings.warn(
1800
+ DeprecationWarning(
1801
+ "'torch._prims_common.check' will be removed in the future. Please use "
1802
+ "'torch._check*' functions instead"
1803
+ )
1804
+ )
1805
+ torch._check_with(exc_type, b, s)
1806
+
1807
+
1808
+ # This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
1809
+ # c10/core/MemoryFormat.h into one function
1810
+ def are_strides_like_channels_last(
1811
+ shape: Sequence[int], strides: Sequence[int]
1812
+ ) -> bool:
1813
+ ndim = len(shape)
1814
+
1815
+ if ndim == 4:
1816
+ # Check for channels_last_2d
1817
+ dim_order = [1, 3, 2, 0]
1818
+ elif ndim == 5:
1819
+ # Check for channels_last_3d
1820
+ dim_order = [1, 4, 3, 2, 0]
1821
+ else:
1822
+ return False
1823
+
1824
+ if strides[1] == 0:
1825
+ return False
1826
+
1827
+ min = 0
1828
+ for d in dim_order:
1829
+ if shape[d] == 0:
1830
+ return False
1831
+ if strides[d] < min:
1832
+ return False
1833
+ if d == 0 and min == strides[1]:
1834
+ return False
1835
+ min = strides[d]
1836
+ if strides[d] > 1:
1837
+ min *= shape[d]
1838
+ return True
1839
+
1840
+
1841
+ def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
1842
+ if x.layout != torch.strided:
1843
+ return torch.contiguous_format
1844
+
1845
+ if are_strides_like_channels_last(x.shape, x.stride()):
1846
+ return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
1847
+
1848
+ return torch.contiguous_format
1849
+
1850
+
1851
+ def prod(xs: Sequence[NumberType]) -> NumberType:
1852
+ """Product of elements in input sequence. Returns 1 for empty sequence"""
1853
+ return reduce(operator.mul, xs, 1)
1854
+
1855
+
1856
+ def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:
1857
+ """Checks if a shape can be expanded to another shape.
1858
+ This is equivalent to checking if the two shapes are broadcastable.
1859
+ """
1860
+ # This is a Python implementation of
1861
+ # aten/src/ATen/ExpandUtils.h:is_expandable_to
1862
+ if len(shape) > len(desired):
1863
+ return False
1864
+ for i in range(len(shape)):
1865
+ if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:
1866
+ return False
1867
+ return True
1868
+
1869
+
1870
+ def mask_tensor(mask: TensorLikeType, t: TensorLikeType):
1871
+ """
1872
+ Similar to torch.where(mask, t, 0) but if t is boolean,
1873
+ result is also boolean and not promoted to int.
1874
+ """
1875
+ # torch.where(mask, t, False) is equivalent
1876
+ # but feels hacky and might break in the future
1877
+ if t.dtype is torch.bool:
1878
+ return mask.logical_and(t)
1879
+ else:
1880
+ return torch.where(mask, t, 0)
1881
+
1882
+
1883
+ def get_aten_op(fn: Callable, name: str):
1884
+ """
1885
+ Given the __module__ of reference and its name, it returns
1886
+ (our best guess of) the ATen name of the associated operation
1887
+
1888
+ Note: In ATen, the __name__ of a function within a module often
1889
+ starts by the module name. E.g. linalg_eigh, or special_zeta
1890
+ """
1891
+ module = fn.__module__
1892
+ prefix = "torch._refs"
1893
+ assert module.startswith(prefix)
1894
+ module = module[len(prefix) :]
1895
+ # We want to go from .special / .nn.functional
1896
+ # to special and special_ / nn_functional_
1897
+ if module:
1898
+ module = module[1:]
1899
+ module = module.replace(".", "_")
1900
+ module = module + "_"
1901
+ return getattr(torch._ops.ops.aten, f"{module}{name}")
1902
+
1903
+
1904
+ def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype:
1905
+ return dtype if dtype is not None else torch.get_default_dtype()
1906
+
1907
+
1908
+ def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType:
1909
+ return device if device is not None else torch.device("cpu")
1910
+
1911
+
1912
+ def layout_or_default(layout: Optional[torch.layout]) -> torch.layout:
1913
+ return layout if layout is not None else torch.strided
1914
+
1915
+
1916
+ def clone_preserve_strides(x):
1917
+ needed_size = compute_required_storage_length(
1918
+ x.size(), x.stride(), x.storage_offset()
1919
+ )
1920
+ # Our eager implementations for *_scatter ops are all primitives w.r.t autograd,
1921
+ # so these as_strided() calls are not seen by autograd.
1922
+ # We need to mimic this behavior in our ref/prim implementations.
1923
+ # TODO: a better way to handle this would be with a new op, "_unsafe_as_strided"
1924
+ # We should revisit this when we add a compositional as_strided op,
1925
+ # and also as part of https://github.com/pytorch/pytorch/issues/90507
1926
+ try:
1927
+ old = torch._C._dispatch_tls_is_dispatch_key_excluded(
1928
+ torch._C.DispatchKey.ADInplaceOrView
1929
+ )
1930
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
1931
+ torch._C.DispatchKey.ADInplaceOrView, True
1932
+ )
1933
+ buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone()
1934
+ return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
1935
+ finally:
1936
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
1937
+ torch._C.DispatchKey.ADInplaceOrView, old
1938
+ )
1939
+
1940
+
1941
+ def alert_not_deterministic(caller: str):
1942
+ if torch.are_deterministic_algorithms_enabled():
1943
+ if torch.is_deterministic_algorithms_warn_only_enabled():
1944
+ warnings.warn(
1945
+ f"{caller} does not have a deterministic implementation, but you set "
1946
+ f"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
1947
+ f"You can file an issue at https://github.com/pytorch/pytorch/issues "
1948
+ f"to help us prioritize adding deterministic support for this operation."
1949
+ )
1950
+ else:
1951
+ torch._check(
1952
+ False,
1953
+ lambda: (
1954
+ f"{caller} does not have a deterministic implementation, but you set "
1955
+ f"'torch.use_deterministic_algorithms(True)'. You can turn off "
1956
+ f"determinism just for this operation, or you can use the "
1957
+ f"'warn_only=True' option, if that's acceptable for your application. "
1958
+ f"You can also file an issue at https://github.com/pytorch/pytorch/issues "
1959
+ f"to help us prioritize adding deterministic support for this operation."
1960
+ ),
1961
+ )
1962
+
1963
+
1964
+ class CUDARngStateHelper:
1965
+ @staticmethod
1966
+ def get_torch_state_as_tuple(fake_mode=nullcontext()):
1967
+ if not torch.cuda.is_available():
1968
+ raise RuntimeError("CUDA not available")
1969
+
1970
+ with fake_mode:
1971
+ seed = torch.tensor(torch.cuda.initial_seed())
1972
+ offset = torch.tensor(torch.cuda._get_rng_state_offset())
1973
+ return seed, offset
1974
+
1975
+ @staticmethod
1976
+ def set_torch_state_tensor(seed, offset):
1977
+ # Rng state is [64-bit seed, 64-bit offset]
1978
+ seed_portion = seed.reshape([1]).view(torch.uint8)
1979
+ offset_portion = offset.reshape([1]).view(torch.uint8)
1980
+ new_state = torch.cat([seed_portion, offset_portion])
1981
+ torch.cuda.set_rng_state(new_state)
1982
+
1983
+ @staticmethod
1984
+ def set_new_offset(relative_offset):
1985
+ torch.cuda._set_rng_state_offset(relative_offset.item())
llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_prims_common/wrappers.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import warnings
3
+ from functools import wraps
4
+ from itertools import chain
5
+
6
+ from typing import Callable, NamedTuple, Optional, overload, Sequence, Tuple
7
+
8
+ import torch
9
+ import torch._prims_common as utils
10
+ from torch._prims_common import (
11
+ CustomOutParamAnnotation,
12
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
13
+ Number,
14
+ NumberType,
15
+ ShapeType,
16
+ TensorLike,
17
+ TensorLikeType,
18
+ )
19
+ from torch.utils import _pytree as pytree
20
+ from torch.utils._pytree import tree_flatten, tree_unflatten
21
+
22
+
23
+ @overload
24
+ def _maybe_convert_to_dtype(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType:
25
+ pass
26
+
27
+
28
+ @overload
29
+ def _maybe_convert_to_dtype(a: NumberType, dtype: torch.dtype) -> NumberType:
30
+ pass
31
+
32
+
33
+ @overload
34
+ def _maybe_convert_to_dtype(a: Sequence, dtype: torch.dtype) -> Sequence:
35
+ pass
36
+
37
+
38
+ @overload
39
+ def _maybe_convert_to_dtype(a: None, dtype: torch.dtype) -> None:
40
+ pass
41
+
42
+
43
+ # TODO: implement ref.cast with an option to enforce safe casting
44
+ def _maybe_convert_to_dtype(a, dtype):
45
+ if isinstance(a, TensorLike):
46
+ if a.dtype != dtype:
47
+ return a.to(dtype)
48
+ return a
49
+ if isinstance(a, Number):
50
+ return utils.dtype_to_type_ctor(dtype)(a) # type: ignore[arg-type]
51
+ if isinstance(a, Sequence):
52
+ return tuple(_maybe_convert_to_dtype(x, dtype) for x in a)
53
+ # Passthrough None because some functions wrapped with type promotion
54
+ # wrapper might have optional args
55
+ if a is None:
56
+ return None
57
+
58
+ raise ValueError(f"Received type {type(a)} that is neither a tensor or a number!")
59
+
60
+
61
+ def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType:
62
+ if not isinstance(a, Number):
63
+ msg = f"Found unknown type {type(a)} when trying to convert scalars!"
64
+ raise ValueError(msg)
65
+ if not utils.is_weakly_lesser_type(type(a), typ):
66
+ msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!"
67
+ raise ValueError(msg)
68
+
69
+ return typ(a)
70
+
71
+
72
+ def _annotation_has_type(*, typ, annotation):
73
+ if hasattr(annotation, "__args__"):
74
+ for a in annotation.__args__:
75
+ if _annotation_has_type(typ=typ, annotation=a):
76
+ return True
77
+ return False
78
+
79
+ return typ is annotation
80
+
81
+
82
+ class elementwise_type_promotion_wrapper:
83
+ """
84
+ Adds elementwise type promotion to a Python reference implementation.
85
+
86
+ Takes two kwargs, type_promoting_args and type_promotion_kind.
87
+
88
+ type_promoting_args must be a string Sequence specifiying the argument names of all
89
+ arguments that participate in type promotion (and should be type promoted). If the
90
+ arg specifies a Sequence-type then every element of the Sequence will participate in
91
+ type promotion.
92
+
93
+ type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND.
94
+ See its documentation for details.
95
+
96
+ The return_dtype will be coerced to the wrapped function's dtype arg if it is available and
97
+ not None.
98
+
99
+ Other type promotion behavior, like validating the Python type of scalar arguments, must
100
+ be handled separately.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ *,
106
+ type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
107
+ type_promoting_args: Optional[Sequence[str]] = None,
108
+ ):
109
+ self.type_promoting_arg_names = type_promoting_args
110
+ self.type_promotion_kind = type_promotion_kind
111
+
112
+ def __call__(self, fn: Callable) -> Callable:
113
+ sig = inspect.signature(fn)
114
+
115
+ @wraps(fn)
116
+ def _fn(*args, **kwargs):
117
+ bound = sig.bind(*args, **kwargs)
118
+ type_promoting_args = tuple(
119
+ bound.arguments[x]
120
+ for x in self.type_promoting_arg_names # type: ignore[union-attr]
121
+ if x in bound.arguments.keys()
122
+ )
123
+
124
+ flattened_type_promoting_args = pytree.arg_tree_leaves(*type_promoting_args)
125
+ compute_dtype, result_dtype = utils.elementwise_dtypes(
126
+ *flattened_type_promoting_args,
127
+ type_promotion_kind=self.type_promotion_kind,
128
+ )
129
+
130
+ promoted_args = {
131
+ x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype)
132
+ for x in self.type_promoting_arg_names # type: ignore[union-attr]
133
+ if x in bound.arguments.keys()
134
+ }
135
+ bound.arguments.update(promoted_args)
136
+
137
+ result = fn(**bound.arguments)
138
+
139
+ # Override the return_dtype if a dtype arg is present and not None
140
+ if "dtype" in bound.arguments:
141
+ maybe_dtype = bound.arguments["dtype"]
142
+ if maybe_dtype: # dtype cannot be None
143
+ result_dtype = maybe_dtype
144
+
145
+ if isinstance(result, TensorLike):
146
+ return _maybe_convert_to_dtype(result, result_dtype)
147
+ if isinstance(result, Sequence):
148
+ return tuple(_maybe_convert_to_dtype(x, result_dtype) for x in result)
149
+ raise AssertionError(f"Unhandled result type: {type(result)}")
150
+
151
+ _fn.__signature__ = sig # type: ignore[attr-defined]
152
+ return _fn
153
+
154
+
155
+ # Returns True if resize is necessary
156
+ def _resize_output_check(out: TensorLikeType, shape: ShapeType):
157
+ # If the shapes are correct there's nothing to do
158
+ if utils.same_shape(out.shape, shape):
159
+ return False
160
+ if out.numel() != 0:
161
+ msg = (
162
+ f"An output with one or more elements was resized since it had shape {str(out.shape)} "
163
+ "which does not match the required output shape {str(shape)}. "
164
+ "This behavior is deprecated, and in a future PyTorch release outputs will not "
165
+ "be resized unless they have zero elements. "
166
+ "You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0)."
167
+ )
168
+ warnings.warn(msg)
169
+ return True
170
+
171
+
172
+ # TODO: handle tuples of tensors
173
+ def _maybe_resize_out(out: TensorLikeType, shape: ShapeType):
174
+ if _resize_output_check(out, shape):
175
+ return out.resize_(shape)
176
+ else:
177
+ return out
178
+
179
+
180
+ def _safe_copy_out(
181
+ *, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False
182
+ ):
183
+ # Checks same device
184
+ if copy_from.device != copy_to.device:
185
+ msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format(
186
+ copy_from.device, copy_to.device
187
+ )
188
+ raise RuntimeError(msg)
189
+
190
+ # Checks safe cast
191
+ if exact_dtype:
192
+ torch._check(
193
+ copy_from.dtype == copy_to.dtype,
194
+ lambda: f"Expected out tensor to have dtype {copy_from.dtype} "
195
+ f"but got {copy_to.dtype} instead",
196
+ )
197
+ else:
198
+ torch._check(
199
+ utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype),
200
+ lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, "
201
+ "but this can't be cast because it is not safe!",
202
+ )
203
+
204
+ return copy_to.copy_(copy_from)
205
+
206
+
207
+ def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool = False):
208
+ # The wrapped function needs to convert the output parameters to ensure
209
+ # compatibility between the Python API (which always uses "out" as the
210
+ # parameter name and may be a tuple) and the Aten API (which may have
211
+ # multiple output parameters and use different parameter names such as
212
+ # "grad_input", "indices" or "values".)
213
+
214
+ default_out_names = ("out",)
215
+ if len(out_names) == 0:
216
+ # Use default in out name
217
+ out_names = default_out_names
218
+
219
+ is_tensor = len(out_names) == 1
220
+
221
+ def _out_wrapper(fn: Callable) -> Callable:
222
+ """
223
+ Adds the out parameter to a Python reference.
224
+ """
225
+ out_type = (
226
+ TensorLikeType
227
+ if is_tensor
228
+ else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))]
229
+ )
230
+ return_type = (
231
+ TensorLikeType
232
+ if is_tensor
233
+ else NamedTuple(
234
+ f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names]
235
+ )
236
+ )
237
+
238
+ sig = inspect.signature(fn)
239
+ factory_kwargs = ("device", "dtype")
240
+ is_factory_fn = all(p in sig.parameters for p in factory_kwargs)
241
+
242
+ @wraps(fn)
243
+ def _fn(*args, out=None, **kwargs):
244
+ if is_factory_fn and out is not None:
245
+ for k in factory_kwargs:
246
+ out_attr = getattr(out, k)
247
+ if k not in kwargs:
248
+ kwargs[k] = out_attr
249
+ if pass_is_out:
250
+ result = fn(*args, is_out=(out is not None), **kwargs)
251
+ else:
252
+ result = fn(*args, **kwargs)
253
+ assert (
254
+ isinstance(result, TensorLike)
255
+ and is_tensor
256
+ or isinstance(result, Tuple) # type: ignore[arg-type]
257
+ and len(result) == len(out_names)
258
+ )
259
+ if out is not None:
260
+ # Naively you might expect this assert to be true, but
261
+ # it's not:
262
+ #
263
+ # assert type(out) == type(result)
264
+ #
265
+ # The reason is that functions under this wrapper can
266
+ # get registered to the Meta dispatch key, and that
267
+ # means they can be executed in a context where tensor
268
+ # subclasses are disabled (with no_dispatch), which is a
269
+ # handy way for an is-a tensor subclass (e.g.,
270
+ # FakeTensor) to have the normal meta backend create a
271
+ # meta tensor, to be wrapped once it gets returned.
272
+ # In this situation, you will get a FakeTensor as
273
+ # the output tensor, but not the result--which will
274
+ # be a normal meta tensor, but this is perfectly
275
+ # harmless.
276
+ if is_tensor:
277
+ assert isinstance(out, TensorLike)
278
+ # These two operations are done in-place
279
+ _maybe_resize_out(out, result.shape)
280
+ _safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type]
281
+ else:
282
+ assert isinstance(out, Tuple) # type: ignore[arg-type]
283
+ torch._check_type(
284
+ len(out) == len(result),
285
+ lambda: f"expected tuple of {len(result)} elements but got {len(out)}",
286
+ )
287
+ for r, o in zip(result, out):
288
+ # These two operations are done in-place
289
+ _maybe_resize_out(o, r.shape)
290
+ _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type]
291
+ else:
292
+ out = result
293
+ # mypy does not see through the definition of out_type given that it's in a different scope
294
+ return out if is_tensor else return_type(*out) # type: ignore[operator]
295
+
296
+ out_param = inspect.Parameter(
297
+ "out",
298
+ kind=inspect.Parameter.KEYWORD_ONLY,
299
+ default=None,
300
+ annotation=out_type,
301
+ )
302
+ # Mark that the function now returns a tuple
303
+ assert isinstance(sig.return_annotation, str) or sig.return_annotation in (
304
+ sig.empty,
305
+ out_type,
306
+ )
307
+ params = chain(sig.parameters.values(), (out_param,))
308
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
309
+ parameters=params, return_annotation=return_type # type: ignore[arg-type]
310
+ )
311
+
312
+ _fn.__annotations__ = fn.__annotations__
313
+ _fn.__annotations__["out"] = out_type
314
+ _fn.__annotations__["return"] = return_type
315
+
316
+ # In the special case of having a single tensor out parameter with a
317
+ # name other than out, add a special annotation to name the parameter
318
+ if is_tensor and out_names != default_out_names:
319
+ _fn.__annotations__[CustomOutParamAnnotation] = out_names[0]
320
+
321
+ # Add an indicator attribute that can be used in special cases
322
+ # where having a function wrapped by `out_wrapper` is not desirable e.g.
323
+ # jit
324
+ _fn._torch_decompositions_out_wrapper = f"This function is wrapped by {out_wrapper.__module__}.out_wrapper" # type: ignore[attr-defined]
325
+
326
+ return _fn
327
+
328
+ return _out_wrapper
329
+
330
+
331
+ def _maybe_remove_out_wrapper(fn: Callable):
332
+ return inspect.unwrap(
333
+ fn,
334
+ stop=lambda f: not hasattr(f, "_torch_decompositions_out_wrapper"),
335
+ )
336
+
337
+
338
+ def backwards_not_supported(prim):
339
+ def redispatch_prim(args, kwargs):
340
+ with torch._C._AutoDispatchBelowAutograd():
341
+ old = torch._C._dispatch_tls_is_dispatch_key_excluded(
342
+ torch._C.DispatchKey.ADInplaceOrView
343
+ )
344
+ return prim(*args, **kwargs)
345
+
346
+ class BackwardsNotSupported(torch.autograd.Function):
347
+ @staticmethod
348
+ def forward(ctx, args_spec, *flat_args):
349
+ args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type]
350
+ return redispatch_prim(args, kwargs)
351
+
352
+ @staticmethod
353
+ def backward(ctx, *args):
354
+ raise RuntimeError("backwards not supported on prim")
355
+
356
+ @wraps(prim)
357
+ def _autograd_impl(*args, **kwargs):
358
+ flat_args, args_spec = tree_flatten((args, kwargs))
359
+ if torch.is_grad_enabled() and any(
360
+ a.requires_grad for a in flat_args if isinstance(a, torch.Tensor)
361
+ ):
362
+ # TODO: There is a subtle bug here: prims like copy_to
363
+ # return their input argument after mutating it; and custom
364
+ # autograd function will incorrectly turn the result into
365
+ # a view which will fail test_python_ref_executor tests.
366
+ # At the moment, we sidestep this by observing that the
367
+ # unit tests don't ever try to run the executor with
368
+ # autograd, so we don't exercise the buggy case, but if
369
+ # you ever want to feed autograd through this, be aware
370
+ # of it! We need a way of properly implementing autograd
371
+ # for mutating operations in Python to do this.
372
+ return BackwardsNotSupported.apply(args_spec, *flat_args)
373
+ else:
374
+ return redispatch_prim(args, kwargs)
375
+
376
+ return _autograd_impl
377
+
378
+
379
+ # TODO: when tracing this will add torch tensors and not TensorMeta objects
380
+ # to the trace -- we should fix this by adding a tracing context and NumberMeta classes
381
+ # TODO: this wrapper is currently untested
382
+ def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable:
383
+ """
384
+ Allows unary operators that accept tensors to work with Python numbers.
385
+ """
386
+ sig = inspect.signature(fn)
387
+
388
+ @wraps(fn)
389
+ def _fn(*args, **kwargs):
390
+ if len(args) > 0 and isinstance(args[0], Number):
391
+ dtype = utils.type_to_dtype(type(args[0]))
392
+ args_ = list(args)
393
+ args_[0] = torch.tensor(args[0], dtype=dtype)
394
+ result = fn(*args_, **kwargs)
395
+ assert isinstance(result, torch.Tensor)
396
+ return result.item()
397
+
398
+ return fn(*args, **kwargs)
399
+
400
+ _fn.__signature__ = sig # type: ignore[attr-defined]
401
+ return _fn
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import cast, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+ from torch.distributed._tensor._utils import compute_local_shape
6
+ from torch.distributed._tensor.op_schema import (
7
+ _is_inplace_op,
8
+ _is_out_variant_op,
9
+ OpSchema,
10
+ OutputSharding,
11
+ )
12
+ from torch.distributed._tensor.ops.utils import prod
13
+ from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta
14
+
15
+
16
+ def _replace_char_in_str(string: str, new_char: str, idx: int) -> str:
17
+ return string[:idx] + new_char + string[idx + 1 :]
18
+
19
+
20
+ def _gen_reshard_suggestions(
21
+ op_schema: OpSchema,
22
+ input_dims: List[str],
23
+ input_specs: Tuple[DTensorSpec, ...],
24
+ dim_to_sharding: Dict[str, int],
25
+ pending_sum: List[int],
26
+ ) -> OutputSharding:
27
+ suggested_arg_specs: List[DTensorSpec] = []
28
+ for input_dim, input_spec in zip(input_dims, input_specs):
29
+ dim_map = [dim_to_sharding[dim] for dim in input_dim]
30
+ suggested_arg_specs.append(
31
+ DTensorSpec.from_dim_map(
32
+ mesh=input_spec.mesh,
33
+ dim_map=dim_map,
34
+ sums=pending_sum,
35
+ tensor_meta=input_spec.tensor_meta,
36
+ )
37
+ )
38
+ suggested_schema = OpSchema(op_schema.op, tuple(suggested_arg_specs), {})
39
+ suggested_schema._inplace_rewrap_schema_suggestion(op_schema)
40
+ return OutputSharding(
41
+ None,
42
+ schema_suggestions=[suggested_schema],
43
+ failed_reason="Input placements op sharding propagation failed, need to reshard!",
44
+ )
45
+
46
+
47
+ def einop_rule(
48
+ equation: str,
49
+ op_schema: OpSchema,
50
+ *,
51
+ linearity: bool = False,
52
+ enforce_sharding: Optional[Dict[str, int]] = None,
53
+ ) -> OutputSharding:
54
+ """
55
+ Propagate the sharding of inputs to output for ops whose data moves according to einsum notation.
56
+
57
+ This is mostly borrowed from @zdevito's sharding simulator. Examples:
58
+ mk,kn->mn - einsum
59
+ ij,ij->ij - addition
60
+ ij,j->ij - broadcasted addition
61
+ ij->i - reduction
62
+ Other ops could use this propagation algorithm when applied, note
63
+ that einsum propagation only deal with list of specs (DTensor specs)
64
+ as it only works on list of tensors!
65
+
66
+ linearity in einop_rule means that the calling op `f` follows this rule:
67
+ f(a + b) = f(a) + f(b)
68
+
69
+ In this case we can propagate the partial sum, note that linearity in einop
70
+ only applies to partial sum, not other operations like min/max (which are
71
+ associative but not linear).
72
+ """
73
+ # parse einop equation and extract arg specs
74
+ inputs, outputs = equation.split("->")
75
+ input_dims, output_dims = inputs.split(","), outputs.split(",")
76
+ input_specs = op_schema.args_spec
77
+ # NOTE: only support single output unless needed in future
78
+ output_dim = output_dims[0]
79
+
80
+ dim_to_sharding: Dict[str, int] = {}
81
+ dim_to_size: Dict[str, int] = {}
82
+ # record pending sum, key is mesh dimension, value is pending sum
83
+ # counter across input specs
84
+ pending_sums_counter: Dict[int, int] = {}
85
+ seen_shardings: Dict[int, str] = {}
86
+ needs_reshard = False
87
+
88
+ def merge_sharding(dim: str, a: int, b: int) -> int:
89
+ # merge the sharding of inputs if it's able to merge, i.e. we can merge
90
+ # replicate and shard to shard, but this will trigger an reshard operation
91
+ if a != b:
92
+ if a == -1 or b == -1:
93
+ # reshard the replicate to match the sharded one
94
+ nonlocal needs_reshard
95
+ needs_reshard = True
96
+ return a if a != -1 else b
97
+ else:
98
+ # TODO: further merge the sharding properly (i.e. reshard one input to replicate)
99
+ raise RuntimeError(
100
+ f"{equation}: dim {dim} sharded two different ways: {a} and {b}"
101
+ )
102
+ else:
103
+ return a
104
+
105
+ for input_dim, input_spec in zip(input_dims, input_specs):
106
+ # deal with partial sums
107
+ input_sums = input_spec.sums
108
+ for sum_dim in input_sums:
109
+ if sum_dim not in pending_sums_counter:
110
+ seen_shardings[sum_dim] = "+"
111
+ # update pending sum counter for pending sum mesh
112
+ # dimension with the occurrence from each input
113
+ pending_sums_counter[sum_dim] = pending_sums_counter.get(sum_dim, 0) + 1
114
+
115
+ for idx, (dim, mesh_dim) in enumerate(zip(input_dim, input_spec.dim_map)):
116
+ if enforce_sharding and dim in enforce_sharding:
117
+ if enforce_sharding[dim] != mesh_dim:
118
+ needs_reshard = True
119
+ dim_to_sharding[dim] = enforce_sharding[dim]
120
+ dim_to_size[dim] = input_spec.shape[idx]
121
+ elif dim not in dim_to_sharding:
122
+ dim_to_sharding[dim] = mesh_dim
123
+ dim_to_size[dim] = input_spec.shape[idx]
124
+ else:
125
+ dim_to_sharding[dim] = merge_sharding(
126
+ dim, dim_to_sharding[dim], mesh_dim
127
+ )
128
+ assert dim_to_size[dim] == input_spec.shape[idx]
129
+
130
+ # after merging sharding, we check if there're multiple
131
+ # sharding on the same mesh dim.
132
+ merged_sharding_for_dim = dim_to_sharding[dim]
133
+ if merged_sharding_for_dim != -1:
134
+ if (
135
+ merged_sharding_for_dim in seen_shardings
136
+ and dim != seen_shardings[merged_sharding_for_dim]
137
+ ):
138
+ needs_reshard = True
139
+ seen_shardings[merged_sharding_for_dim] += dim
140
+ else:
141
+ seen_shardings[merged_sharding_for_dim] = dim
142
+
143
+ if pending_sums_counter and not linearity:
144
+ # return reshard suggestion with no pending sum, because we already properly
145
+ # merge the sharding, this reshard suggestion is legit to use
146
+ return _gen_reshard_suggestions(
147
+ op_schema, input_dims, input_specs, dim_to_sharding, []
148
+ )
149
+ else:
150
+ # It's a op that support linearity, but not all input arguments are partial
151
+ # we fail the sharding propagation with suggestion to make all inputs be
152
+ # partial on the corresponding mesh dim (all inputs should be partial for
153
+ # the mesh dims in order to execute locally and delay the sum reduction)
154
+ for value in pending_sums_counter.values():
155
+ if value != len(input_specs):
156
+ needs_reshard = True
157
+
158
+ for mesh_dim, dims in seen_shardings.items():
159
+ if len(dims) > 1:
160
+ # we found different input dims are being sharded on the same mesh dim
161
+ # in order to perform local op computation, we need to reshard inputs
162
+ # base on some simple heuristics, now we simply pick the one with least comm
163
+ # volume. (i.e. the input with least size)
164
+ # TODO: consider a more advanced heuristic to pick the best sharding
165
+ costs = []
166
+ for d in dims:
167
+ cost = 0
168
+ for input_dim, input_spec in zip(input_dims, input_specs):
169
+ if (
170
+ d in input_dim
171
+ and input_spec.dim_map[input_dim.index(d)] == mesh_dim
172
+ ):
173
+ assert input_spec.tensor_meta is not None
174
+ global_shape = input_spec.tensor_meta.shape
175
+ local_shape = compute_local_shape(
176
+ global_shape, input_spec.mesh, input_spec.placements
177
+ )
178
+ cost += prod(local_shape) * input_spec.mesh.size(mesh_dim)
179
+ costs.append(cost)
180
+ d_to_keep_sharding = dims[costs.index(max(costs))]
181
+ for d in dims:
182
+ # update dim_to_sharding to keep the sharding of the dim with
183
+ # highest comm and make the rest of the dims to replicate
184
+ if d != d_to_keep_sharding:
185
+ dim_to_sharding[d] = -1
186
+
187
+ pending_sums = list(pending_sums_counter.keys())
188
+ if needs_reshard:
189
+ return _gen_reshard_suggestions(
190
+ op_schema, input_dims, input_specs, dim_to_sharding, pending_sums
191
+ )
192
+
193
+ # generate output pending sum if a dim is sharded, and it appears in input
194
+ # but not output
195
+ for dim, shard_on_mesh in dim_to_sharding.items():
196
+ if dim not in output_dims[0] and shard_on_mesh != -1:
197
+ pending_sums.append(shard_on_mesh)
198
+
199
+ # if no need to reshard, we directly generate the output sharding
200
+ output_dim_map = []
201
+ output_shape = []
202
+ for dim in output_dim:
203
+ if dim == "1":
204
+ # find output dim that is a singleton dimension, mark sharding and shape
205
+ output_dim_map.append(-1)
206
+ output_shape.append(1)
207
+ else:
208
+ output_dim_map.append(dim_to_sharding[dim])
209
+ output_shape.append(dim_to_size[dim])
210
+
211
+ # XXX: since we still need to have intermediate shape calculation, we need
212
+ # to pass in the shape here. We should remove this once sharding decomp works
213
+ # for ops like addmm
214
+ assert input_specs[0].tensor_meta is not None
215
+ tensor_meta = TensorMeta(
216
+ torch.Size(output_shape),
217
+ input_specs[0].tensor_meta.stride,
218
+ input_specs[0].tensor_meta.dtype,
219
+ )
220
+ return OutputSharding(
221
+ DTensorSpec.from_dim_map(
222
+ input_specs[0].mesh,
223
+ output_dim_map,
224
+ pending_sums,
225
+ tensor_meta=tensor_meta,
226
+ )
227
+ )
228
+
229
+
230
+ def pointwise_rule(op_schema: OpSchema, linearity: bool = False) -> OutputSharding:
231
+ """
232
+ Propagate the sharding for pointwise operations.
233
+
234
+ Examples:
235
+ ij,ij->ij - addition/mul
236
+ ij,j->ij - broadcasted addition
237
+ """
238
+ alphabet = "abcdefghijklmnopqrstuvwxyz"
239
+ # find the max_dim first in case we need to broadcasting
240
+ input_specs = op_schema.args_spec
241
+ max_dim = max(input.ndim for input in input_specs)
242
+ dimchars = []
243
+ singleton_counter: List[int] = [0] * max_dim
244
+ for input in input_specs:
245
+ start_dim = max_dim - input.ndim
246
+ p = alphabet[start_dim:max_dim]
247
+ # handle the "broadcasting to a common shape case"
248
+ # see https://pytorch.org/docs/stable/notes/broadcasting.html
249
+ # If any of the dimensions is singleton dimension (i.e. 1).
250
+ # we mark the dim char as a special "1" to distinguish with
251
+ # the non-singleton dimension, so that sharding propagation
252
+ # should just ignore the singleton dimension.
253
+ if len(input_specs) > 1:
254
+ for i in range(max_dim):
255
+ if i < start_dim:
256
+ # treat the leading miss dim chars as singleton
257
+ singleton_counter[i] += 1
258
+ elif input.shape[i - start_dim] == 1:
259
+ # mark singleton dim char as a special "1" in einop rule
260
+ singleton_counter[i] += 1
261
+ p = _replace_char_in_str(p, "1", (i - start_dim))
262
+
263
+ dimchars.append(p)
264
+ out_dimchars = alphabet[:max_dim]
265
+ # check if we replace the all inputs dim char with singleton dimension,
266
+ # if we replace all inputs, we also need to replace the output dimension.
267
+ for output_dim_idx in range(len(out_dimchars)):
268
+ out_dimchar = out_dimchars[output_dim_idx]
269
+ if singleton_counter[output_dim_idx] == len(input_specs):
270
+ out_dimchars = _replace_char_in_str(out_dimchars, "1", output_dim_idx)
271
+
272
+ fmt = f"{','.join(p for p in dimchars)}->{out_dimchars}"
273
+
274
+ enforce_sharding: Dict[str, int] = {}
275
+ if _is_inplace_op(op_schema.op):
276
+ # inplace op should keep the input sharding it writes to
277
+ for out_dimchar, mesh_dim in zip(out_dimchars, input_specs[0].dim_map):
278
+ enforce_sharding[out_dimchar] = mesh_dim
279
+ elif _is_out_variant_op(op_schema.op):
280
+ out_spec = cast(DTensorSpec, op_schema.kwargs_schema["out"])
281
+ for out_dimchar, mesh_dim in zip(out_dimchars, out_spec.dim_map):
282
+ enforce_sharding[out_dimchar] = mesh_dim
283
+
284
+ return einop_rule(
285
+ fmt,
286
+ op_schema,
287
+ linearity=linearity,
288
+ enforce_sharding=enforce_sharding,
289
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # implement matrix related ops for distributed tensor
3
+ from typing import List
4
+
5
+ import torch
6
+ from torch.distributed._tensor.op_schema import OpSchema, OutputSharding
7
+ from torch.distributed._tensor.ops.utils import register_prop_rule
8
+ from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta
9
+
10
+ aten = torch.ops.aten
11
+
12
+
13
+ @register_prop_rule(aten.convolution.default)
14
+ def convolution_rules(op_schema: OpSchema) -> OutputSharding:
15
+ (
16
+ input_spec,
17
+ weight_spec,
18
+ bias_spec,
19
+ stride,
20
+ padding,
21
+ dilation,
22
+ transposed,
23
+ output_padding,
24
+ groups,
25
+ ) = op_schema.args_schema
26
+
27
+ assert isinstance(input_spec, DTensorSpec)
28
+ assert isinstance(weight_spec, DTensorSpec)
29
+ assert isinstance(bias_spec, DTensorSpec)
30
+ assert input_spec.tensor_meta is not None
31
+ assert weight_spec.tensor_meta is not None
32
+ in_shape = input_spec.tensor_meta.shape
33
+ weight_shape = weight_spec.tensor_meta.shape
34
+ assert isinstance(stride, List)
35
+ assert isinstance(padding, List)
36
+ assert isinstance(dilation, List)
37
+ assert isinstance(weight_shape, torch.Size)
38
+ N, C_in, H_in, W_in = in_shape[0], in_shape[1], in_shape[2], in_shape[3]
39
+ C_out = weight_shape[0]
40
+ H_out = (H_in + 2 * padding[0] - dilation[0] * (weight_shape[2] - 1) - 1) // stride[
41
+ 0
42
+ ] + 1
43
+ W_out = (W_in + 2 * padding[1] - dilation[1] * (weight_shape[3] - 1) - 1) // stride[
44
+ 1
45
+ ] + 1
46
+ output_shape = [N, C_out, H_out, W_out]
47
+ output_stride = (C_out * H_out * W_out, H_out * W_out, W_out, 1)
48
+ output_dim_map = input_spec.dim_map
49
+ pending_sums = input_spec.sums
50
+
51
+ tensor_meta = TensorMeta(
52
+ torch.Size(output_shape),
53
+ output_stride,
54
+ input_spec.tensor_meta.dtype,
55
+ )
56
+ return OutputSharding(
57
+ DTensorSpec.from_dim_map(
58
+ input_spec.mesh,
59
+ output_dim_map,
60
+ pending_sums,
61
+ tensor_meta=tensor_meta,
62
+ )
63
+ )
64
+
65
+
66
+ @register_prop_rule(aten.convolution_backward.default)
67
+ def convolution_backward_rules(op_schema: OpSchema) -> OutputSharding:
68
+ input_spec = op_schema.args_schema[0]
69
+ (
70
+ grad_output_spec,
71
+ input_spec,
72
+ weight_spec,
73
+ bias_shape_opt,
74
+ stride,
75
+ padding,
76
+ dilation,
77
+ transposed,
78
+ output_padding,
79
+ groups,
80
+ output_mask,
81
+ ) = op_schema.args_schema
82
+
83
+ assert isinstance(grad_output_spec, DTensorSpec)
84
+ assert isinstance(input_spec, DTensorSpec)
85
+ assert isinstance(weight_spec, DTensorSpec)
86
+ assert isinstance(bias_shape_opt, List)
87
+ assert input_spec.tensor_meta is not None
88
+ weight_tensor_meta = weight_spec.tensor_meta
89
+ bias_tensor_meta = TensorMeta(
90
+ torch.Size(bias_shape_opt),
91
+ (1,),
92
+ input_spec.tensor_meta.dtype,
93
+ )
94
+
95
+ grad_input_spec = input_spec
96
+ grad_weight_spec = DTensorSpec.from_dim_map(
97
+ input_spec.mesh,
98
+ [-1, -1, -1, -1],
99
+ [0],
100
+ tensor_meta=weight_tensor_meta,
101
+ )
102
+ grad_bias_spec = DTensorSpec.from_dim_map(
103
+ input_spec.mesh,
104
+ [-1],
105
+ [0],
106
+ tensor_meta=bias_tensor_meta,
107
+ )
108
+ return OutputSharding([grad_input_spec, grad_weight_spec, grad_bias_spec])
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # implement matrix related ops for distributed tensor
3
+ import itertools
4
+ from dataclasses import dataclass, field
5
+ from typing import cast, List, Optional
6
+
7
+ import torch
8
+ import torch.distributed._functional_collectives as funcol
9
+ from torch.distributed._tensor.op_schema import (
10
+ OpSchema,
11
+ OpStrategy,
12
+ PlacementStrategy,
13
+ StrategyType,
14
+ )
15
+ from torch.distributed._tensor.ops.utils import (
16
+ generate_redistribute_costs,
17
+ is_tensor_shardable,
18
+ register_op_strategy,
19
+ )
20
+
21
+ from torch.distributed._tensor.placement_types import (
22
+ _Partial,
23
+ DTensorSpec,
24
+ Placement,
25
+ Replicate,
26
+ Shard,
27
+ )
28
+
29
+ from torch.distributed.device_mesh import DeviceMesh
30
+
31
+ aten = torch.ops.aten
32
+
33
+
34
+ @dataclass
35
+ class MaskBuffer:
36
+ data: Optional[torch.Tensor] = None
37
+
38
+ def materialize_mask(self, mask):
39
+ if self.data is not None:
40
+ raise RuntimeError("MaskBuffer has already been materialized")
41
+ self.data = mask
42
+
43
+ def release_mask(self):
44
+ # TODO: evaluate if we need to release the mask buffer or the buffer
45
+ # can just have the same lifetime as the _Partial placement
46
+ if self.data is None:
47
+ raise RuntimeError("MaskBuffer has not been materialized")
48
+ self.data = None
49
+
50
+ def apply_mask(self, tensor):
51
+ if self.data is None:
52
+ raise RuntimeError("MaskBuffer has not been materialized")
53
+
54
+ # NOTE: _MaskPartial is being used by the embedding op and the gather op.
55
+ # For gather, the mask has the same dimension as the output tensor, whereas
56
+ # the output of the embedding op has an additional dimension compare to the input,
57
+ # hence the output masking logic below having two different cases.
58
+ if tensor.ndim == self.data.ndim:
59
+ tensor[self.data] = 0.0
60
+ else:
61
+ tensor[self.data, :] = 0.0
62
+
63
+
64
+ @dataclass(frozen=True)
65
+ class _MaskPartial(_Partial):
66
+ """
67
+ A partial mask placement devised for rowwise sharded embedding op, where we need
68
+ to mask and adjust the indices to the local embedding shard, embedding masking
69
+ is a special type of the Partial placement
70
+
71
+ NOTE: the lifecycle of this MaskPartial placement follows the corresponding DTensor
72
+ lifecycle, i.e. the indices_mask would only be alive during the lifetime of the DTensor.
73
+ """
74
+
75
+ logical_dim_size: int = -1
76
+ mask_buffer: MaskBuffer = field(default_factory=MaskBuffer)
77
+
78
+ def _partition_value(
79
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
80
+ ) -> torch.Tensor:
81
+ # override parent logic to perform partial mask for embedding
82
+ num_chunks = mesh.size(mesh_dim)
83
+ # get local shard size and offset on the embedding_dim
84
+ local_shard_size, local_offset_on_dim = Shard._local_shard_size_on_dim(
85
+ self.logical_dim_size,
86
+ num_chunks,
87
+ mesh.get_local_rank(mesh_dim),
88
+ return_offset=True,
89
+ )
90
+ # Build the input mask and save it for the current partial placement
91
+ # this is so that the output of embedding op can reuse the same partial
92
+ # placement saved mask to perform mask + reduction
93
+ mask = (tensor < local_offset_on_dim) | (
94
+ tensor >= local_offset_on_dim + local_shard_size
95
+ )
96
+ # mask the input tensor
97
+ masked_tensor = tensor.clone() - local_offset_on_dim
98
+ masked_tensor[mask] = 0
99
+ # materialize the mask buffer to be used for reduction
100
+ self.mask_buffer.materialize_mask(mask)
101
+ return masked_tensor
102
+
103
+ def _reduce_value(
104
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
105
+ ) -> torch.Tensor:
106
+ # by the time we ned reduction, we should have already saved the mask
107
+ assert self.mask_buffer.data is not None
108
+
109
+ # apply the mask to the tensor that pending reduction
110
+ self.mask_buffer.apply_mask(tensor)
111
+
112
+ # clear the mask buffer
113
+ self.mask_buffer.release_mask()
114
+
115
+ # perform sum reduction
116
+ return funcol.all_reduce(
117
+ tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim)
118
+ )
119
+
120
+ def _reduce_shard_value(
121
+ self,
122
+ tensor: torch.Tensor,
123
+ mesh: DeviceMesh,
124
+ mesh_dim: int,
125
+ shard_spec: Placement,
126
+ ) -> torch.Tensor:
127
+ # by the time we ned reduction, we should have already saved the mask
128
+ assert self.mask_buffer.data is not None
129
+
130
+ # apply the mask to the tensor that pending reduction
131
+ self.mask_buffer.apply_mask(tensor)
132
+
133
+ # clear the mask buffer
134
+ self.mask_buffer.release_mask()
135
+
136
+ # call reduce_shard_tensor of the shard_spec.
137
+ shard_spec = cast(Shard, shard_spec)
138
+ return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim)
139
+
140
+ def __eq__(self, other: object) -> bool:
141
+ if not isinstance(other, _MaskPartial):
142
+ return False
143
+
144
+ # if either data is not None, we invalidate the sharding cache, as this indicates
145
+ # the current MaskPartial placement is still in use and should not be used for cache hit.
146
+ if self.mask_buffer.data is not None or other.mask_buffer.data is not None:
147
+ return False
148
+
149
+ return (
150
+ self.reduce_op == other.reduce_op
151
+ and self.logical_dim_size == other.logical_dim_size
152
+ )
153
+
154
+ def __hash__(self) -> int:
155
+ return 1 + hash(
156
+ (self.logical_dim_size, id(self.mask_buffer.data), self.reduce_op)
157
+ )
158
+
159
+ def __repr__(self) -> str:
160
+ """
161
+ machine readable representation of the MaskPartial placement
162
+ """
163
+ return f"_MaskPartial(logical_dim_size={self.logical_dim_size})"
164
+
165
+ def __str__(self) -> str:
166
+ """
167
+ human readable representation of the MaskPartial placement
168
+ """
169
+ return "MaskP"
170
+
171
+
172
+ @register_op_strategy(aten.embedding.default)
173
+ def embedding_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
174
+ """
175
+ This strategy handles embedding op. We have two possible embedding shardings:
176
+ rowwise and colwise
177
+ # TODO: implement rowwise sharding
178
+ """
179
+ weight_strategy = cast(OpStrategy, op_schema.args_schema[0])
180
+ indices_strategy = cast(OpStrategy, op_schema.args_schema[1])
181
+
182
+ weight_shape = weight_strategy.output_shape
183
+ indices_shape = indices_strategy.output_shape
184
+ output_emd_dim = len(indices_shape)
185
+
186
+ all_mesh_dim_strategies = []
187
+
188
+ for mesh_dim in range(mesh.ndim):
189
+ single_mesh_dim_strategies = []
190
+
191
+ # placement list stores placements of [output, weight, input_indices]
192
+ # first we always have replicate all for inputs and output
193
+ all_replicate: List[Placement] = [Replicate()] * 3
194
+ single_mesh_dim_strategies.append(all_replicate)
195
+
196
+ # colwise sharding, output shard on last dim, weight shard on dim 1, input replicate
197
+ colwise_sharding = [Shard(output_emd_dim), Shard(1), Replicate()]
198
+ single_mesh_dim_strategies.append(colwise_sharding)
199
+
200
+ # rowwise sharding, output is embedding partial, weight shard on dim 0, input accepts embedding partial
201
+ embedding_partial_placement = _MaskPartial(logical_dim_size=weight_shape[0])
202
+
203
+ # NOTE we want to reuse the same mask partial placement so that we can reuse the same mask that generates
204
+ # from the input indices and use it for output reduction
205
+ rowwise_sharding = [
206
+ embedding_partial_placement,
207
+ Shard(0),
208
+ embedding_partial_placement,
209
+ ]
210
+ single_mesh_dim_strategies.append(rowwise_sharding)
211
+
212
+ # batch dim sharding, weight replicated, input can shard on any dim, output follows input
213
+ for input_dim in range(len(indices_shape)):
214
+ batch_sharding = [Shard(input_dim), Replicate(), Shard(input_dim)]
215
+ single_mesh_dim_strategies.append(batch_sharding)
216
+
217
+ all_mesh_dim_strategies.append(single_mesh_dim_strategies)
218
+
219
+ strategy_combs = itertools.product(*all_mesh_dim_strategies)
220
+
221
+ all_strategies = []
222
+ for strategy_comb in strategy_combs:
223
+ spec_list = []
224
+ for specs in zip(*strategy_comb):
225
+ spec_list.append(DTensorSpec(mesh, tuple(specs)))
226
+
227
+ if is_tensor_shardable(weight_shape, spec_list[1]) and is_tensor_shardable(
228
+ indices_shape, spec_list[2]
229
+ ):
230
+ # only add to the strategy list when both weight and indices are shardable
231
+ weight_spec, indices_spec = spec_list[1:]
232
+ redistribute_cost = [
233
+ generate_redistribute_costs(weight_strategy, weight_spec),
234
+ generate_redistribute_costs(indices_strategy, indices_spec),
235
+ ]
236
+ strat = PlacementStrategy(
237
+ output_specs=spec_list[0],
238
+ input_specs=spec_list[1:],
239
+ redistribute_cost=redistribute_cost,
240
+ )
241
+ all_strategies.append(strat)
242
+
243
+ return OpStrategy(all_strategies)
244
+
245
+
246
+ @register_op_strategy(aten.embedding_dense_backward.default)
247
+ def embedding_dense_backward_strategy(
248
+ mesh: DeviceMesh, op_schema: OpSchema
249
+ ) -> StrategyType:
250
+ """
251
+ This strategy handles embedding op. We have two possible embedding shardings:
252
+ rowwise and colwise
253
+ # TODO: implement rowwise sharding backward
254
+ """
255
+ grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0])
256
+ indices_strategy = cast(OpStrategy, op_schema.args_schema[1])
257
+
258
+ grad_out_shape = grad_out_strategy.output_shape
259
+ indices_shape = indices_strategy.output_shape
260
+ grad_out_ndim = len(grad_out_shape)
261
+
262
+ all_mesh_dim_strategies = []
263
+
264
+ for mesh_dim in range(mesh.ndim):
265
+ single_mesh_dim_strategies = []
266
+
267
+ # placement list stores placements of [output, weight, input_indices]
268
+ # first we always have replicate all for inputs and output
269
+ all_replicate: List[Placement] = [Replicate()] * 3
270
+ single_mesh_dim_strategies.append(all_replicate)
271
+
272
+ # colwise sharding backward, grad_out shard on last dim, input replicate,
273
+ # weight grad shard colwise
274
+ colwise_sharding = [Shard(1), Shard(grad_out_ndim - 1), Replicate()]
275
+ single_mesh_dim_strategies.append(colwise_sharding)
276
+
277
+ # batch dim sharding, weight replicated, grad_out/input have same sharding
278
+ # that can shard on any dim, weight grad partial
279
+ for input_dim in range(len(indices_shape)):
280
+ batch_sharding = [_Partial(), Shard(input_dim), Shard(input_dim)]
281
+ single_mesh_dim_strategies.append(batch_sharding)
282
+
283
+ # grad_out partial, input replicate, weight grad keep partial
284
+ partial_sharding = [_Partial(), _Partial(), Replicate()]
285
+ single_mesh_dim_strategies.append(partial_sharding)
286
+
287
+ all_mesh_dim_strategies.append(single_mesh_dim_strategies)
288
+
289
+ strategy_combs = itertools.product(*all_mesh_dim_strategies)
290
+
291
+ all_strategies = []
292
+ for strategy_comb in strategy_combs:
293
+ spec_list = []
294
+ for specs in zip(*strategy_comb):
295
+ spec_list.append(DTensorSpec(mesh, tuple(specs)))
296
+
297
+ if is_tensor_shardable(grad_out_shape, spec_list[1]) and is_tensor_shardable(
298
+ indices_shape, spec_list[2]
299
+ ):
300
+ # only add to the strategy list when both grad_out and indices are shardable
301
+ grad_out_spec, indices_spec = spec_list[1:]
302
+ redistribute_cost = [
303
+ generate_redistribute_costs(grad_out_strategy, grad_out_spec),
304
+ generate_redistribute_costs(indices_strategy, indices_spec),
305
+ ]
306
+ strat = PlacementStrategy(
307
+ output_specs=spec_list[0],
308
+ input_specs=spec_list[1:],
309
+ redistribute_cost=redistribute_cost,
310
+ )
311
+ all_strategies.append(strat)
312
+
313
+ return OpStrategy(all_strategies)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # implement matrix related ops for distributed tensor
3
+ from typing import List
4
+
5
+ try:
6
+ import numpy as np
7
+ except ModuleNotFoundError:
8
+ np = None # type: ignore[assignment]
9
+
10
+ import torch
11
+ from torch.distributed._tensor.op_schema import OpSchema, OutputSharding
12
+ from torch.distributed._tensor.ops.utils import register_prop_rule
13
+ from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta
14
+
15
+ aten = torch.ops.aten
16
+
17
+
18
+ @register_prop_rule(aten.slice_backward.default)
19
+ def slice_backward_rules(op_schema: OpSchema) -> OutputSharding:
20
+ grad_output_spec, input_sizes, dim, start, end, step = op_schema.args_schema
21
+ assert isinstance(grad_output_spec, DTensorSpec)
22
+ assert isinstance(input_sizes, List)
23
+ assert grad_output_spec.tensor_meta is not None
24
+ grad_input_stride = list(np.cumprod(input_sizes[::-1])[:-1][::-1])
25
+ grad_input_stride.append(1)
26
+ dim_map = grad_output_spec.dim_map
27
+ sums = grad_output_spec.sums
28
+
29
+ grad_input_tensor_meta = TensorMeta(
30
+ torch.Size(input_sizes),
31
+ tuple(grad_input_stride),
32
+ grad_output_spec.tensor_meta.dtype,
33
+ )
34
+ grad_input_spec = DTensorSpec.from_dim_map(
35
+ grad_output_spec.mesh,
36
+ dim_map,
37
+ sums,
38
+ tensor_meta=grad_input_tensor_meta,
39
+ )
40
+
41
+ return OutputSharding(grad_input_spec)
42
+
43
+
44
+ @register_prop_rule(aten.bernoulli.default)
45
+ @register_prop_rule(aten.bernoulli_.float)
46
+ def bernoulli_rules(op_schema: OpSchema) -> OutputSharding:
47
+ input_spec = op_schema.args_schema[0]
48
+ assert isinstance(input_spec, DTensorSpec)
49
+ return OutputSharding(input_spec)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py ADDED
@@ -0,0 +1,957 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ from typing import cast, List, Optional, Sequence, Tuple, Union
5
+
6
+ import torch
7
+
8
+ import torch.distributed.distributed_c10d as c10d
9
+ from torch.distributed._tensor.op_schema import (
10
+ OpSchema,
11
+ OpStrategy,
12
+ PlacementStrategy,
13
+ RuntimeSchemaInfo,
14
+ TupleStrategy,
15
+ )
16
+ from torch.distributed._tensor.ops.utils import (
17
+ as_list,
18
+ generate_redistribute_costs,
19
+ is_tensor_evenly_shardable,
20
+ normalize_dim,
21
+ normalize_dims,
22
+ normalize_to_torch_size,
23
+ register_op_strategy,
24
+ )
25
+ from torch.distributed._tensor.placement_types import (
26
+ _Partial,
27
+ DTensorSpec,
28
+ Placement,
29
+ Replicate,
30
+ Shard,
31
+ )
32
+ from torch.distributed.device_mesh import DeviceMesh
33
+
34
+
35
+ aten = torch.ops.aten
36
+
37
+
38
+ class Reduction(Enum):
39
+ NONE = 0
40
+ MEAN = 1
41
+ SUM = 2
42
+
43
+
44
+ @dataclass(frozen=True)
45
+ class NormReduction:
46
+ norm_type: Union[int, float, str]
47
+
48
+
49
+ ReductionOpType = Union[NormReduction, c10d.ReduceOp.RedOpType]
50
+
51
+
52
+ @dataclass(frozen=True)
53
+ class _NormPartial(_Partial):
54
+ """
55
+ This placement is used for partial vector norm.
56
+
57
+ For p-norms (where p not inf or -inf), the p-norm over n elements computes
58
+ (sum_i x_i^p)^(1/p)
59
+ where the sum is from i=1 to n. The reduction op is the p-norm itself.
60
+ For example, consider 2 ranks, a (4,) tensor sharded on dim-0, and 2-norm:
61
+ Rank 0: [t1, t2] | Rank 1: [t3, t4]
62
+ After computing 2-norm per gradient (partial placement):
63
+ Rank 0: [sqrt(t1^2 + t2^2)] | Rank 1: [sqrt(t3^2 + t4^2)]
64
+ Converting from partial to replicate wants to ultimately get:
65
+ Rank 0/1: [sqrt(t1^2 + t2^2 + t3^2 + t4^2)]
66
+ This can be achieved by computing 2-norm on each rank's result. This holds
67
+ similarly for inf and -inf norm. For 0-norm, the reduction op is sum.
68
+ """
69
+
70
+ norm_type: Union[int, float, str] = 2
71
+
72
+ def __post_init__(self):
73
+ """Set the appropriate reduce op based on the norm type."""
74
+ # Use `object.__setattr__` to bypass frozen checks
75
+ if self.norm_type in (float("inf"), "inf"):
76
+ object.__setattr__(self, "reduce_op", c10d.ReduceOp.MAX)
77
+ elif self.norm_type in (float("-inf"), "-inf"):
78
+ object.__setattr__(self, "reduce_op", c10d.ReduceOp.MIN)
79
+ elif isinstance(self.norm_type, (int, float)):
80
+ object.__setattr__(self, "reduce_op", c10d.ReduceOp.SUM)
81
+ else:
82
+ raise NotImplementedError(f"Unsupported norm type: {self.norm_type}")
83
+
84
+ def _partition_value(
85
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
86
+ ) -> torch.Tensor:
87
+ if self.reduce_op in (c10d.ReduceOp.MAX, c10d.ReduceOp.MIN):
88
+ return tensor
89
+ elif self.reduce_op == c10d.ReduceOp.SUM:
90
+ return tensor / mesh.size(mesh_dim=mesh_dim)
91
+ raise NotImplementedError(self.reduce_op)
92
+
93
+ def _reduce_shard_value(
94
+ self,
95
+ tensor: torch.Tensor,
96
+ mesh: DeviceMesh,
97
+ mesh_dim: int,
98
+ shard_spec: Placement,
99
+ ) -> torch.Tensor:
100
+ assert isinstance(shard_spec, Shard), f"{shard_spec}"
101
+ tensor = self._pre_reduce_transform(tensor)
102
+ reduced_tensor = super()._reduce_shard_value(tensor, mesh, mesh_dim, shard_spec)
103
+ return self._post_reduce_transform(reduced_tensor)
104
+
105
+ def _reduce_value(
106
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
107
+ ) -> torch.Tensor:
108
+ tensor = self._pre_reduce_transform(tensor)
109
+ reduced_tensor = super()._reduce_value(tensor, mesh, mesh_dim)
110
+ return self._post_reduce_transform(reduced_tensor)
111
+
112
+ def _pre_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor:
113
+ if self.reduce_op == c10d.ReduceOp.SUM:
114
+ assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}"
115
+ if self.norm_type != 0 and self.norm_type != 1:
116
+ return tensor**self.norm_type
117
+ return tensor
118
+
119
+ def _post_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor:
120
+ if self.reduce_op == c10d.ReduceOp.SUM:
121
+ assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}"
122
+ if self.norm_type != 0 and self.norm_type != 1:
123
+ return tensor ** (1.0 / self.norm_type)
124
+ return tensor
125
+
126
+
127
+ def _infer_reduction_dims(dims_arg: object, ndim: int) -> Optional[List[int]]:
128
+ if dims_arg is None:
129
+ return None
130
+ dims = cast(List[int], as_list(dims_arg))
131
+ dims = cast(List[int], normalize_dims(dims, ndim))
132
+ empty_dims = [[0], [-1], []]
133
+ if ndim == 0 and dims_arg in empty_dims:
134
+ return None
135
+ return dims
136
+
137
+
138
+ def _infer_reduce_dims_map(
139
+ reduction_dims: List[int], input_ndim: int, keep_dim=False
140
+ ) -> List[int]:
141
+ reduction_dims_map = []
142
+ new_dim_count = 0
143
+ for input_dim in range(input_ndim):
144
+ if input_dim in reduction_dims and not keep_dim:
145
+ # if input dim in reduction dims, mark it as -1
146
+ reduction_dims_map.append(-1)
147
+ else:
148
+ # otherwise mark it as the new dim
149
+ reduction_dims_map.append(new_dim_count)
150
+ new_dim_count += 1
151
+
152
+ return reduction_dims_map
153
+
154
+
155
+ def replicate_reduction_dims(
156
+ placements: Tuple[Placement, ...], reduction_dims: List[int]
157
+ ) -> Tuple[Placement, ...]:
158
+ # replicate the reduction dims if not reduction_linear
159
+ new_placements: List[Placement] = []
160
+
161
+ for p in placements:
162
+ if p.is_partial():
163
+ new_placements.append(Replicate())
164
+ elif isinstance(p, Shard) and p.dim in reduction_dims:
165
+ new_placements.append(Replicate())
166
+ else:
167
+ new_placements.append(p)
168
+
169
+ return tuple(new_placements)
170
+
171
+
172
+ def map_placements_after_reduction(
173
+ placements: Tuple[Placement, ...],
174
+ reduction_dims: List[int],
175
+ reduction_dims_map: List[int],
176
+ reduction_op: ReductionOpType,
177
+ ) -> Tuple[Placement, ...]:
178
+ """
179
+ Map each placement based on the output shape after reduction.
180
+ """
181
+ new_placements: List[Placement] = []
182
+ for placement in placements:
183
+ if isinstance(placement, (Replicate, _Partial)):
184
+ new_placements.append(placement)
185
+ else:
186
+ assert isinstance(placement, Shard)
187
+ shard_dim = placement.dim
188
+ new_shard_dim = reduction_dims_map[shard_dim]
189
+ if new_shard_dim == -1 or shard_dim in reduction_dims:
190
+ # if new_shard_dim collapsed or its in the reduction dims
191
+ # (i.e. for the case where keepdims=True), we generate partial
192
+ new_placements.append(get_placement_from_reduction_op(reduction_op))
193
+ else:
194
+ new_placements.append(Shard(new_shard_dim))
195
+ return tuple(new_placements)
196
+
197
+
198
+ def get_placement_from_reduction_op(reduction_op: ReductionOpType) -> Placement:
199
+ if isinstance(reduction_op, NormReduction):
200
+ return _NormPartial(norm_type=reduction_op.norm_type)
201
+ return _Partial(reduction_op)
202
+
203
+
204
+ def common_reduction_strategy(
205
+ mesh: DeviceMesh,
206
+ input_strategy: OpStrategy,
207
+ reduce_dims: List[int],
208
+ keep_dim: bool = False,
209
+ reduction_linear: bool = True,
210
+ reduction_op: ReductionOpType = c10d.ReduceOp.SUM,
211
+ ) -> OpStrategy:
212
+ """
213
+ reduction_linear means that the reduction `f` follows this rule:
214
+ f([f(a), f(b)]) = f([a, b])
215
+
216
+ reduction linear should be super set of linearity.
217
+ """
218
+ # by default follow reduction input strategy
219
+ reduction_strategy = OpStrategy([])
220
+
221
+ for strtg in input_strategy.strategies:
222
+ if not reduction_linear:
223
+ # input placements for this strategy should clear out pending sum and sharding
224
+ # on the reduction dimension
225
+ input_placements = replicate_reduction_dims(
226
+ strtg.output_spec.placements, reduce_dims
227
+ )
228
+ else:
229
+ input_placements = strtg.output_spec.placements
230
+
231
+ input_spec = DTensorSpec(
232
+ mesh=mesh,
233
+ placements=input_placements,
234
+ tensor_meta=strtg.output_spec.tensor_meta,
235
+ )
236
+
237
+ reduce_dims_map = _infer_reduce_dims_map(reduce_dims, input_spec.ndim, keep_dim)
238
+ out_placements = map_placements_after_reduction(
239
+ input_spec.placements, reduce_dims, reduce_dims_map, reduction_op
240
+ )
241
+ redistribute_cost = [generate_redistribute_costs(input_strategy, input_spec)]
242
+ reduction_strategy.strategies.append(
243
+ PlacementStrategy(
244
+ output_specs=DTensorSpec(
245
+ mesh=mesh,
246
+ placements=out_placements,
247
+ ),
248
+ input_specs=(input_spec,),
249
+ redistribute_cost=redistribute_cost,
250
+ )
251
+ )
252
+
253
+ return reduction_strategy
254
+
255
+
256
+ LINEAR_REDUCTION_OP_MAP = {
257
+ aten.all.default: c10d.ReduceOp.SUM,
258
+ aten.all.dim: c10d.ReduceOp.SUM,
259
+ aten.sum.default: c10d.ReduceOp.SUM,
260
+ aten.sum.dim_IntList: c10d.ReduceOp.SUM,
261
+ aten.prod.default: c10d.ReduceOp.PRODUCT,
262
+ aten.prod.dim_int: c10d.ReduceOp.PRODUCT,
263
+ aten.prod.int_out: c10d.ReduceOp.PRODUCT,
264
+ aten.mean.default: c10d.ReduceOp.AVG,
265
+ aten.mean.dim: c10d.ReduceOp.AVG,
266
+ aten.mean.out: c10d.ReduceOp.AVG,
267
+ aten.max.default: c10d.ReduceOp.MAX,
268
+ aten.max.dim: c10d.ReduceOp.MAX,
269
+ aten.max.out: c10d.ReduceOp.MAX,
270
+ aten.min.default: c10d.ReduceOp.MIN,
271
+ aten.min.dim: c10d.ReduceOp.MIN,
272
+ aten.min.out: c10d.ReduceOp.MIN,
273
+ }
274
+
275
+
276
+ @register_op_strategy(
277
+ list(LINEAR_REDUCTION_OP_MAP.keys()), schema_info=RuntimeSchemaInfo(1)
278
+ )
279
+ def linear_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
280
+ args_schema = op_schema.args_schema
281
+ input_strategy = args_schema[0]
282
+ assert isinstance(input_strategy, OpStrategy)
283
+ dims = None
284
+ if len(op_schema.args_schema) > 1:
285
+ dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim)
286
+
287
+ reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims
288
+
289
+ keep_dim = len(op_schema.args_schema) > 2 and bool(op_schema.args_schema[2])
290
+ reduction_op = LINEAR_REDUCTION_OP_MAP[op_schema.op]
291
+ return common_reduction_strategy(
292
+ mesh,
293
+ input_strategy,
294
+ reduce_dims,
295
+ keep_dim=keep_dim,
296
+ reduction_linear=True,
297
+ reduction_op=reduction_op,
298
+ )
299
+
300
+
301
+ @register_op_strategy(
302
+ [aten.var.correction, aten.var.correction_out],
303
+ schema_info=RuntimeSchemaInfo(1, ["keepdim"]),
304
+ )
305
+ def var_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
306
+ args_schema = op_schema.args_schema
307
+ input_strategy = args_schema[0]
308
+ assert isinstance(input_strategy, OpStrategy)
309
+ dims = None
310
+ if len(op_schema.args_schema) > 1:
311
+ dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim)
312
+
313
+ reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims
314
+
315
+ keep_dim = cast(bool, op_schema.kwargs_schema.get("keepdim", False))
316
+ return common_reduction_strategy(
317
+ mesh, input_strategy, reduce_dims, keep_dim=keep_dim, reduction_linear=False
318
+ )
319
+
320
+
321
+ @register_op_strategy(
322
+ [aten.linalg_vector_norm.default], schema_info=RuntimeSchemaInfo(1)
323
+ )
324
+ def vector_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
325
+ args_schema = op_schema.args_schema
326
+ input_strategy = args_schema[0]
327
+ assert isinstance(input_strategy, OpStrategy)
328
+ norm_type = args_schema[1] if len(args_schema) > 1 else 2
329
+ assert isinstance(norm_type, (int, float, str)), f"{norm_type}"
330
+ dim = args_schema[2] if len(args_schema) > 2 else None
331
+ keepdim = args_schema[3] if len(args_schema) > 3 else False
332
+ dims = _infer_reduction_dims(dim, input_strategy.output_ndim)
333
+ reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims
334
+ return common_reduction_strategy(
335
+ mesh,
336
+ input_strategy,
337
+ reduce_dims,
338
+ keep_dim=cast(bool, keepdim),
339
+ reduction_linear=True,
340
+ reduction_op=NormReduction(norm_type),
341
+ )
342
+
343
+
344
+ @register_op_strategy(
345
+ [aten._foreach_norm.Scalar], schema_info=RuntimeSchemaInfo(1, needs_pytree=True)
346
+ )
347
+ def foreach_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> TupleStrategy:
348
+ args_schema = op_schema.args_schema
349
+ input_tuple_strategy = args_schema[0]
350
+ assert isinstance(input_tuple_strategy, TupleStrategy)
351
+ norm_type = args_schema[1]
352
+ assert isinstance(norm_type, (int, float, str)), f"{norm_type}"
353
+ output_tuple_strategy_childs: List[OpStrategy] = []
354
+ for op_strategy in input_tuple_strategy.childs:
355
+ assert isinstance(op_strategy, OpStrategy), f"{op_strategy}"
356
+ reduce_dims = list(range(op_strategy.output_ndim))
357
+ output_strategy = common_reduction_strategy(
358
+ mesh,
359
+ op_strategy,
360
+ reduce_dims,
361
+ reduction_linear=True,
362
+ reduction_op=NormReduction(norm_type),
363
+ )
364
+ output_tuple_strategy_childs.append(output_strategy)
365
+ return TupleStrategy(output_tuple_strategy_childs)
366
+
367
+
368
+ @register_op_strategy(
369
+ [aten._log_softmax.default, aten._softmax.default], schema_info=RuntimeSchemaInfo(1)
370
+ )
371
+ def softmax_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
372
+ input_strategy, softmax_dim, _ = op_schema.args_schema
373
+ input_strategy = cast(OpStrategy, input_strategy)
374
+ softmax_dim = cast(int, softmax_dim)
375
+ softmax_dim = normalize_dim(softmax_dim, input_strategy.output_ndim)
376
+
377
+ output_strategy = OpStrategy([])
378
+ for idx, input_placement_strategy in enumerate(input_strategy.strategies):
379
+ redistribute_costs = []
380
+ input_src_spec = input_placement_strategy.output_spec
381
+
382
+ # make sure input is replicated along the softmax dim
383
+ input_target_spec = DTensorSpec(
384
+ mesh=mesh,
385
+ placements=replicate_reduction_dims(
386
+ input_src_spec.placements, [softmax_dim]
387
+ ),
388
+ tensor_meta=input_src_spec.tensor_meta,
389
+ )
390
+ redistribute_costs.append(
391
+ generate_redistribute_costs(input_strategy, input_target_spec)
392
+ )
393
+ output_target_spec = input_target_spec
394
+ output_strategy.strategies.append(
395
+ PlacementStrategy(
396
+ output_specs=output_target_spec,
397
+ input_specs=[input_target_spec],
398
+ redistribute_cost=redistribute_costs,
399
+ )
400
+ )
401
+
402
+ return output_strategy
403
+
404
+
405
+ @register_op_strategy(
406
+ [
407
+ aten._log_softmax_backward_data.default,
408
+ aten._softmax_backward_data.default,
409
+ ],
410
+ schema_info=RuntimeSchemaInfo(2),
411
+ )
412
+ def softmax_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
413
+ grad_out_strategy, out_strategy, softmax_dim, _ = op_schema.args_schema
414
+ grad_out_strategy = cast(OpStrategy, grad_out_strategy)
415
+ out_strategy = cast(OpStrategy, out_strategy)
416
+ softmax_dim = cast(int, softmax_dim)
417
+ softmax_dim = normalize_dim(softmax_dim, grad_out_strategy.output_ndim)
418
+
419
+ grad_in_strategy = OpStrategy([])
420
+ for grad_out_placement_strat, out_placement_strat in zip(
421
+ grad_out_strategy.strategies, out_strategy.strategies
422
+ ):
423
+ # follow the sharding of the grad_out or out depending on which has more shards
424
+ grad_out_src_spec = grad_out_placement_strat.output_spec
425
+ out_src_spec = out_placement_strat.output_spec
426
+ src_spec = (
427
+ grad_out_src_spec
428
+ if grad_out_src_spec.num_shards >= out_src_spec.num_shards
429
+ else out_src_spec
430
+ )
431
+
432
+ # make sure inputs are replicated along the softmax dim
433
+ tgt_spec = DTensorSpec(
434
+ mesh=mesh,
435
+ placements=replicate_reduction_dims(src_spec.placements, [softmax_dim]),
436
+ )
437
+ redist_grad_out_cost = generate_redistribute_costs(grad_out_strategy, tgt_spec)
438
+ redist_out_cost = generate_redistribute_costs(out_strategy, tgt_spec)
439
+ grad_in_strategy.strategies.append(
440
+ PlacementStrategy(
441
+ output_specs=tgt_spec,
442
+ redistribute_cost=[redist_grad_out_cost, redist_out_cost],
443
+ )
444
+ )
445
+
446
+ return grad_in_strategy
447
+
448
+
449
+ @register_op_strategy(
450
+ [aten.nll_loss_forward.default, aten.nll_loss2d_forward.default],
451
+ schema_info=RuntimeSchemaInfo(3),
452
+ )
453
+ def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
454
+ assert len(op_schema.args_schema) == 5
455
+ (
456
+ input_strategy,
457
+ target_strategy,
458
+ weight_strategy,
459
+ reduction,
460
+ _,
461
+ ) = op_schema.args_schema
462
+ input_strategy = cast(OpStrategy, input_strategy)
463
+ target_strategy = cast(OpStrategy, target_strategy)
464
+ reduction = cast(int, reduction)
465
+
466
+ input_shape = input_strategy.output_shape
467
+ channel_dim = 1 if len(input_shape) >= 2 else 0
468
+
469
+ output_strategy = OpStrategy([])
470
+ for idx, input_placement_strategy in enumerate(input_strategy.strategies):
471
+ op_args_target_specs = []
472
+ redistribute_costs = []
473
+
474
+ # make sure input is replicated along the channel dim
475
+ input_src_spec = input_placement_strategy.output_spec
476
+ input_expected_spec = DTensorSpec(
477
+ mesh=mesh,
478
+ placements=replicate_reduction_dims(
479
+ input_src_spec.placements, [channel_dim]
480
+ ),
481
+ tensor_meta=input_src_spec.tensor_meta,
482
+ )
483
+ op_args_target_specs.append(input_expected_spec)
484
+ redistribute_costs.append(
485
+ generate_redistribute_costs(input_strategy, input_expected_spec)
486
+ )
487
+
488
+ # target doesn't have channel dim, and it follows input on other dims
489
+ target_src_spec = target_strategy.strategies[idx].output_spec
490
+ target_expected_spec = DTensorSpec(
491
+ mesh=mesh,
492
+ placements=_skip_dim(input_expected_spec.placements, channel_dim),
493
+ tensor_meta=target_src_spec.tensor_meta,
494
+ )
495
+ op_args_target_specs.append(target_expected_spec)
496
+ redistribute_costs.append(
497
+ generate_redistribute_costs(target_strategy, target_expected_spec)
498
+ )
499
+
500
+ # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim]
501
+ # make sure it is replicated
502
+ if weight_strategy is not None:
503
+ assert isinstance(weight_strategy, OpStrategy)
504
+ weight_src_spec = weight_strategy.strategies[idx].output_spec
505
+ weight_expected_spec = DTensorSpec(
506
+ mesh=mesh,
507
+ placements=_replicate_dims_start_at(weight_src_spec.placements),
508
+ tensor_meta=weight_src_spec.tensor_meta,
509
+ )
510
+ op_args_target_specs.append(weight_expected_spec)
511
+ redistribute_costs.append(
512
+ generate_redistribute_costs(weight_strategy, weight_expected_spec)
513
+ )
514
+
515
+ if reduction == Reduction.NONE.value:
516
+ output_expected_spec = target_expected_spec
517
+ total_weight_expected_spec = DTensorSpec(
518
+ mesh=mesh, placements=tuple([Replicate()] * mesh.ndim)
519
+ )
520
+ else:
521
+ if reduction == Reduction.MEAN.value:
522
+ reduction_op = c10d.ReduceOp.AVG
523
+ if not is_tensor_evenly_shardable(
524
+ target_expected_spec.shape, target_expected_spec
525
+ ):
526
+ raise ValueError(
527
+ "The intermediate results of nll_loss cannot be evenly sharded, \
528
+ resulting in biased mean result."
529
+ )
530
+ else: # reduction == Reduction.SUM.value:
531
+ reduction_op = c10d.ReduceOp.SUM
532
+ reduce_dims = list(range(target_expected_spec.ndim))
533
+ reduce_dims_map = _infer_reduce_dims_map(
534
+ reduce_dims, target_expected_spec.ndim, keep_dim=False
535
+ )
536
+ out_placements = map_placements_after_reduction(
537
+ target_expected_spec.placements,
538
+ reduce_dims,
539
+ reduce_dims_map,
540
+ reduction_op,
541
+ )
542
+ output_expected_spec = DTensorSpec(
543
+ mesh=mesh,
544
+ placements=out_placements,
545
+ )
546
+
547
+ # whether reduction is sum or mean, the total weight has to be summed up if not replicated
548
+ total_weight_placements = map_placements_after_reduction(
549
+ target_expected_spec.placements,
550
+ reduce_dims,
551
+ reduce_dims_map,
552
+ c10d.ReduceOp.SUM,
553
+ )
554
+ total_weight_expected_spec = DTensorSpec(
555
+ mesh=mesh,
556
+ placements=total_weight_placements,
557
+ )
558
+
559
+ output_strategy.strategies.append(
560
+ PlacementStrategy(
561
+ output_specs=(output_expected_spec, total_weight_expected_spec),
562
+ input_specs=op_args_target_specs,
563
+ redistribute_cost=redistribute_costs,
564
+ )
565
+ )
566
+
567
+ return output_strategy
568
+
569
+
570
+ @register_op_strategy(
571
+ [aten.nll_loss_backward.default, aten.nll_loss2d_backward.default],
572
+ schema_info=RuntimeSchemaInfo(4),
573
+ )
574
+ def nll_loss_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
575
+ assert len(op_schema.args_schema) == 7
576
+ (
577
+ grad_out_strategy,
578
+ input_strategy,
579
+ target_strategy,
580
+ weight_strategy,
581
+ reduction,
582
+ _,
583
+ total_weight_strategy,
584
+ ) = op_schema.args_schema
585
+ grad_out_strategy = cast(OpStrategy, grad_out_strategy)
586
+ input_strategy = cast(OpStrategy, input_strategy)
587
+ target_strategy = cast(OpStrategy, target_strategy)
588
+ reduction = cast(int, reduction)
589
+ total_weight_strategy = cast(OpStrategy, total_weight_strategy)
590
+
591
+ input_shape = input_strategy.output_shape
592
+ channel_dim = 1 if len(input_shape) >= 2 else 0
593
+
594
+ grad_in_strategy = OpStrategy([])
595
+ for idx, input_placement_strategy in enumerate(input_strategy.strategies):
596
+ op_args_target_specs = []
597
+ redistribute_costs = []
598
+
599
+ # make sure input is replicated along the channel dim
600
+ input_src_spec = input_placement_strategy.output_spec
601
+ input_expected_spec = DTensorSpec(
602
+ mesh=mesh,
603
+ placements=replicate_reduction_dims(
604
+ input_src_spec.placements, [channel_dim]
605
+ ),
606
+ tensor_meta=input_src_spec.tensor_meta,
607
+ )
608
+ op_args_target_specs.append(input_expected_spec)
609
+ redistribute_costs.append(
610
+ generate_redistribute_costs(input_strategy, input_expected_spec)
611
+ )
612
+
613
+ # target doesn't have channel dim, and it follows input on other dims
614
+ target_src_spec = target_strategy.strategies[idx].output_spec
615
+ target_expected_spec = DTensorSpec(
616
+ mesh=mesh,
617
+ placements=_skip_dim(input_expected_spec.placements, channel_dim),
618
+ tensor_meta=target_src_spec.tensor_meta,
619
+ )
620
+ op_args_target_specs.append(target_expected_spec)
621
+ redistribute_costs.append(
622
+ generate_redistribute_costs(target_strategy, target_expected_spec)
623
+ )
624
+
625
+ # grad_out follows target if there is no reduction;
626
+ # otherwise, it should be a replicated scalar.
627
+ grad_out_src_spec = grad_out_strategy.strategies[idx].output_spec
628
+ if reduction == Reduction.NONE.value:
629
+ grad_out_expected_spec = target_expected_spec
630
+ else:
631
+ grad_out_expected_spec = DTensorSpec(
632
+ mesh=mesh,
633
+ placements=_replicate_dims_start_at(grad_out_src_spec.placements),
634
+ tensor_meta=grad_out_src_spec.tensor_meta,
635
+ )
636
+ op_args_target_specs.insert(0, grad_out_expected_spec)
637
+ redistribute_costs.insert(
638
+ 0, generate_redistribute_costs(grad_out_strategy, grad_out_expected_spec)
639
+ )
640
+
641
+ # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim]
642
+ # make sure it is replicated
643
+ if weight_strategy is not None:
644
+ assert isinstance(weight_strategy, OpStrategy)
645
+ weight_src_spec = weight_strategy.strategies[idx].output_spec
646
+ weight_expected_spec = DTensorSpec(
647
+ mesh=mesh,
648
+ placements=_replicate_dims_start_at(weight_src_spec.placements),
649
+ tensor_meta=weight_src_spec.tensor_meta,
650
+ )
651
+ op_args_target_specs.append(weight_expected_spec)
652
+ redistribute_costs.append(
653
+ generate_redistribute_costs(weight_strategy, weight_expected_spec)
654
+ )
655
+
656
+ # total_weight should always be replicated
657
+ total_weight_src_spec = total_weight_strategy.strategies[idx].output_spec
658
+ total_weight_expected_spec = DTensorSpec(
659
+ mesh=mesh,
660
+ placements=_replicate_dims_start_at(total_weight_src_spec.placements),
661
+ tensor_meta=total_weight_src_spec.tensor_meta,
662
+ )
663
+ op_args_target_specs.append(total_weight_expected_spec)
664
+ redistribute_costs.append(
665
+ generate_redistribute_costs(
666
+ total_weight_strategy, total_weight_expected_spec
667
+ )
668
+ )
669
+
670
+ grad_in_expected_spec = input_expected_spec
671
+ grad_in_strategy.strategies.append(
672
+ PlacementStrategy(
673
+ output_specs=grad_in_expected_spec,
674
+ input_specs=op_args_target_specs,
675
+ redistribute_cost=redistribute_costs,
676
+ )
677
+ )
678
+
679
+ return grad_in_strategy
680
+
681
+
682
+ @register_op_strategy(
683
+ [aten.native_layer_norm.default],
684
+ schema_info=RuntimeSchemaInfo(1),
685
+ )
686
+ def layer_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
687
+ # args must be: input, normalized_shape, weight, bias, eps
688
+ # for None weight and bias, their corresponding objects will
689
+ # be None as well. layer_norm_strategy returns one OpStrategy
690
+ # for the triple return values (out, mean, rstd).
691
+ assert len(op_schema.args_schema) == 5
692
+ (
693
+ input_strategy,
694
+ normalized_shape,
695
+ weight_strategy,
696
+ bias_strategy,
697
+ _,
698
+ ) = op_schema.args_schema
699
+
700
+ # the current layer norm implementation requires that all
701
+ # input DTensor's sharding must be in form of OpStrategy
702
+ assert isinstance(input_strategy, OpStrategy)
703
+ assert isinstance(normalized_shape, (int, Sequence, torch.Size))
704
+ normalized_size = normalize_to_torch_size(normalized_shape)
705
+
706
+ input_ndim = input_strategy.output_ndim
707
+ axis = input_ndim - len(normalized_size)
708
+
709
+ # we use OpStrategy because the output (out, mean, rstd)
710
+ # should have the same placements
711
+ output_strategy = OpStrategy([])
712
+ for idx, input_placement_strategy in enumerate(input_strategy.strategies):
713
+ op_args_target_specs = []
714
+ redistribute_costs = []
715
+ input_src_spec = input_placement_strategy.output_spec
716
+
717
+ # for the input tensor, we replicate it on the inner dims if necessary
718
+ # TODO: we can avoid forcing the redistribution once we figure out
719
+ # how to decompose layer norm
720
+ input_target_spec = DTensorSpec(
721
+ mesh=mesh,
722
+ placements=_replicate_dims_start_at(input_src_spec.placements, axis),
723
+ tensor_meta=input_src_spec.tensor_meta,
724
+ )
725
+ op_args_target_specs.append(input_target_spec)
726
+ redistribute_costs.append(
727
+ generate_redistribute_costs(input_strategy, input_target_spec)
728
+ )
729
+
730
+ if weight_strategy is not None:
731
+ assert isinstance(weight_strategy, OpStrategy)
732
+ weight_src_spec = weight_strategy.strategies[idx].output_spec
733
+
734
+ # for the weight tensor, we replicate it on all dims if necessary
735
+ # TODO: we can avoid forcing the redistribution once we figure out
736
+ # how to decompose layer norm
737
+ weight_target_spec = DTensorSpec(
738
+ mesh=mesh,
739
+ placements=_replicate_dims_start_at(weight_src_spec.placements),
740
+ tensor_meta=weight_src_spec.tensor_meta,
741
+ )
742
+ op_args_target_specs.append(weight_target_spec)
743
+ redistribute_costs.append(
744
+ generate_redistribute_costs(weight_strategy, weight_target_spec)
745
+ )
746
+
747
+ if bias_strategy is not None:
748
+ assert isinstance(bias_strategy, OpStrategy)
749
+ bias_src_spec = bias_strategy.strategies[idx].output_spec
750
+
751
+ # for the bias tensor, we replicate it on all dims if necessary
752
+ # TODO: we can avoid forcing the redistribution once we figure out
753
+ # how to decompose layer norm
754
+ bias_target_spec = DTensorSpec(
755
+ mesh=mesh,
756
+ placements=_replicate_dims_start_at(bias_src_spec.placements),
757
+ tensor_meta=bias_src_spec.tensor_meta,
758
+ )
759
+ op_args_target_specs.append(bias_target_spec)
760
+ redistribute_costs.append(
761
+ generate_redistribute_costs(bias_strategy, bias_target_spec)
762
+ )
763
+
764
+ # the output spec is the same as input spec
765
+ output_target_spec = input_target_spec
766
+ output_strategy.strategies.append(
767
+ PlacementStrategy(
768
+ output_specs=output_target_spec,
769
+ input_specs=op_args_target_specs,
770
+ redistribute_cost=redistribute_costs,
771
+ )
772
+ )
773
+
774
+ return output_strategy
775
+
776
+
777
+ @register_op_strategy(
778
+ [aten.native_layer_norm_backward.default],
779
+ schema_info=RuntimeSchemaInfo(2),
780
+ )
781
+ def layer_norm_bwd_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
782
+ # args must be: grad_out, input, normalized_shape, mean, rstd,
783
+ # weight, bias, output_mask. For None weight and bias, their
784
+ # corresponding objects will be None as well.
785
+ assert len(op_schema.args_schema) == 8
786
+ (
787
+ grad_out_strategy,
788
+ input_strategy,
789
+ normalized_shape,
790
+ mean_strategy,
791
+ rstd_strategy,
792
+ weight_strategy,
793
+ bias_strategy,
794
+ output_mask,
795
+ ) = op_schema.args_schema
796
+
797
+ assert isinstance(grad_out_strategy, OpStrategy)
798
+ assert isinstance(input_strategy, OpStrategy)
799
+ assert isinstance(mean_strategy, OpStrategy)
800
+ assert isinstance(rstd_strategy, OpStrategy)
801
+
802
+ assert isinstance(normalized_shape, (int, Sequence, torch.Size))
803
+ normalized_size = normalize_to_torch_size(normalized_shape)
804
+ input_ndim = input_strategy.output_ndim
805
+ axis = input_ndim - len(normalized_size)
806
+ outer_dims = list(range(axis))
807
+
808
+ assert isinstance(output_mask, List) and len(output_mask) == 3
809
+
810
+ # output triple: (d_input, d_weight, d_bias)
811
+ out_tuple_strategy = OpStrategy([])
812
+ for idx, input_placement_strategy in enumerate(input_strategy.strategies):
813
+ # args for PlacementStrategy
814
+ output_specs_list: List[Optional[DTensorSpec]] = []
815
+ op_args_target_specs = []
816
+ redistribute_costs = []
817
+
818
+ input_src_spec = input_placement_strategy.output_spec
819
+ # arg: grad_out
820
+ # TODO: change the strategy to the following rule.
821
+ # d_input is basically a product of element-wise mul of
822
+ # grad_out, rstd, and normalized input, among which rstd
823
+ # and normalized input (x_hat) should have the same sharding
824
+ # placements, and grad_out's sharding is determined by the
825
+ # pointwise result of x_hat and weight/bias.
826
+ if output_mask[0]:
827
+ # TODO: now grad_out spec follows input spec. we may need
828
+ # to change it to apply a pointwise rule over grad_out,
829
+ # input, and weight.
830
+ grad_out_target_spec = DTensorSpec(
831
+ mesh=mesh,
832
+ placements=_replicate_dims_start_at(input_src_spec.placements, axis),
833
+ tensor_meta=input_src_spec.tensor_meta,
834
+ )
835
+ op_args_target_specs.append(grad_out_target_spec)
836
+ redistribute_costs.append(
837
+ generate_redistribute_costs(grad_out_strategy, grad_out_target_spec)
838
+ )
839
+ output_specs_list.append(grad_out_target_spec)
840
+ else:
841
+ output_specs_list.append(None)
842
+
843
+ # arg: input
844
+ input_target_spec = DTensorSpec(
845
+ mesh=mesh,
846
+ placements=_replicate_dims_start_at(input_src_spec.placements, axis),
847
+ tensor_meta=input_src_spec.tensor_meta,
848
+ )
849
+ op_args_target_specs.append(input_target_spec)
850
+ redistribute_costs.append(
851
+ generate_redistribute_costs(input_strategy, input_target_spec)
852
+ )
853
+
854
+ # arg: mean, rstd
855
+ mean_src_spec = mean_strategy.strategies[idx].output_spec
856
+ op_args_target_specs.append(mean_src_spec)
857
+ redistribute_costs.append([0.0 for _ in mean_strategy.strategies])
858
+ rstd_src_spec = rstd_strategy.strategies[idx].output_spec
859
+ op_args_target_specs.append(rstd_src_spec)
860
+ redistribute_costs.append([0.0 for _ in rstd_strategy.strategies])
861
+
862
+ # arg: weight
863
+ # d_weight = sum(grad_out * (input - mean) / rstd, outer_dim, keepdim=False)
864
+ if output_mask[1]:
865
+ assert isinstance(weight_strategy, OpStrategy)
866
+ weight_src_spec = weight_strategy.strategies[idx].output_spec
867
+ # no need to redistribute weight since they should be replicated
868
+ # in forward pass
869
+ op_args_target_specs.append(weight_src_spec)
870
+ redistribute_costs.append([0.0 for _ in weight_strategy.strategies])
871
+ # TODO: now d_weight spec follows input spec w/ a reduction.
872
+ # we may need to change to a pointwise rule over grad_out and
873
+ # input, then apply a reduction.
874
+ inp_placements = _replicate_dims_start_at(input_src_spec.placements, axis)
875
+ reduce_dims_map = _infer_reduce_dims_map(
876
+ outer_dims, input_src_spec.ndim, False
877
+ )
878
+ out_placements = map_placements_after_reduction(
879
+ inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM
880
+ )
881
+ output_specs_list.append(
882
+ DTensorSpec(
883
+ mesh=mesh,
884
+ placements=out_placements,
885
+ tensor_meta=weight_src_spec.tensor_meta,
886
+ )
887
+ )
888
+ else:
889
+ output_specs_list.append(None)
890
+
891
+ # arg: bias
892
+ # d_bias = sum(grad_out, outer_dim, keepdim=False)
893
+ if output_mask[2]:
894
+ assert isinstance(bias_strategy, OpStrategy)
895
+ bias_src_spec = bias_strategy.strategies[idx].output_spec
896
+ # no need to redistribute weight since they should be replicated
897
+ # in forward pass
898
+ op_args_target_specs.append(bias_src_spec)
899
+ redistribute_costs.append([0.0 for _ in bias_strategy.strategies])
900
+ # Currently we do not support the case where output_mask[0] is False while
901
+ # output_mask[1] is True. But it's easy to support that by accessing
902
+ # grad_out_spec via a local variable rather than the list. We just don't
903
+ # see the case.
904
+ grad_out_spec = output_specs_list[0]
905
+ assert isinstance(grad_out_spec, DTensorSpec)
906
+ # d_bias spec follows a reduction over grad_out
907
+ inp_placements = _replicate_dims_start_at(grad_out_spec.placements, axis)
908
+ reduce_dims_map = _infer_reduce_dims_map(
909
+ outer_dims, grad_out_spec.ndim, False
910
+ )
911
+ out_placements = map_placements_after_reduction(
912
+ inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM
913
+ )
914
+ output_specs_list.append(
915
+ DTensorSpec(
916
+ mesh=mesh,
917
+ placements=out_placements,
918
+ tensor_meta=bias_src_spec.tensor_meta,
919
+ )
920
+ )
921
+ else:
922
+ output_specs_list.append(None)
923
+
924
+ out_tuple_strategy.strategies.append(
925
+ PlacementStrategy(
926
+ output_specs=tuple(output_specs_list),
927
+ input_specs=op_args_target_specs,
928
+ redistribute_cost=redistribute_costs,
929
+ )
930
+ )
931
+
932
+ return out_tuple_strategy
933
+
934
+
935
+ def _replicate_dims_start_at(
936
+ placements: Sequence[Placement], start_dim: int = 0
937
+ ) -> Tuple[Placement, ...]:
938
+ new_placements: List[Placement] = []
939
+ for p in placements:
940
+ if p.is_partial() or (isinstance(p, Shard) and p.dim >= start_dim):
941
+ new_placements.append(Replicate()) # make it replicate
942
+ else:
943
+ new_placements.append(p) # keep the placement
944
+ return tuple(new_placements)
945
+
946
+
947
+ # return new_placements which align with placements but skip the skipped_dim
948
+ def _skip_dim(
949
+ placements: Tuple[Placement, ...], skipped_dim: int
950
+ ) -> Tuple[Placement, ...]:
951
+ new_placements: List[Placement] = []
952
+ for p in placements:
953
+ if isinstance(p, Shard) and p.dim >= skipped_dim:
954
+ new_placements.append(Shard(p.dim - 1))
955
+ else:
956
+ new_placements.append(p)
957
+ return tuple(new_placements)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # implement matrix related ops for distributed tensor
3
+ import itertools
4
+ from typing import List, Optional
5
+
6
+ import torch
7
+ from torch.distributed._tensor.op_schema import (
8
+ OpSchema,
9
+ OpStrategy,
10
+ OutputSharding,
11
+ PlacementStrategy,
12
+ )
13
+ from torch.distributed._tensor.ops.basic_strategy import gen_einsum_strategies
14
+ from torch.distributed._tensor.ops.common_rules import einop_rule
15
+ from torch.distributed._tensor.ops.utils import (
16
+ generate_redistribute_costs,
17
+ infer_broadcast_dims_map,
18
+ is_tensor_shardable,
19
+ map_placements_after_broadcast,
20
+ register_op_strategy,
21
+ register_prop_rule,
22
+ )
23
+ from torch.distributed._tensor.placement_types import (
24
+ DTensorSpec,
25
+ Placement,
26
+ Replicate,
27
+ Shard,
28
+ )
29
+
30
+ from torch.distributed.device_mesh import DeviceMesh
31
+
32
+ aten = torch.ops.aten
33
+
34
+
35
+ @register_prop_rule(aten.t.default)
36
+ def transpose_rule(op_schema: OpSchema) -> OutputSharding:
37
+ return einop_rule("ij->ji", op_schema, linearity=True)
38
+
39
+
40
+ def _mm_like_strategy(
41
+ mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema
42
+ ) -> OpStrategy:
43
+ self_strategy, mat2_strategy = op_schema.args_schema
44
+ assert isinstance(self_strategy, OpStrategy)
45
+ assert isinstance(mat2_strategy, OpStrategy)
46
+ # generate all possible strategies for mm
47
+ mm_strategy = gen_einsum_strategies(mm_equation, mesh)
48
+ # filter out invalid strategies and associate costs
49
+ strategies = mm_strategy.strategies
50
+ filtered_strategies = []
51
+ for strtg in strategies:
52
+ assert strtg.input_specs is not None
53
+ self_spec = strtg.input_specs[0]
54
+ mat2_spec = strtg.input_specs[1]
55
+ if is_tensor_shardable(
56
+ self_strategy.output_shape, self_spec
57
+ ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec):
58
+ redistribute_cost = [
59
+ generate_redistribute_costs(self_strategy, self_spec),
60
+ generate_redistribute_costs(mat2_strategy, mat2_spec),
61
+ ]
62
+ strtg.redistribute_cost = redistribute_cost
63
+ filtered_strategies.append(strtg)
64
+
65
+ mm_strategy.strategies = filtered_strategies
66
+
67
+ return mm_strategy
68
+
69
+
70
+ def _addmm_like_strategy(
71
+ mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema
72
+ ) -> OpStrategy:
73
+ self_strategy, mat1_strategy, mat2_strategy = op_schema.args_schema
74
+ assert isinstance(self_strategy, OpStrategy)
75
+ assert isinstance(mat1_strategy, OpStrategy)
76
+ assert isinstance(mat2_strategy, OpStrategy)
77
+ self_shape = self_strategy.output_shape
78
+ mm_out_shape = torch.Size(
79
+ [
80
+ mat2_strategy.output_shape[-1]
81
+ if i == len(mat1_strategy.output_shape) - 1
82
+ else dim_size
83
+ for i, dim_size in enumerate(mat1_strategy.output_shape)
84
+ ]
85
+ )
86
+ # generate all possible strategies for mm
87
+ mm_strategy = gen_einsum_strategies(mm_equation, mesh)
88
+ # filter out invalid strategies and associate costs
89
+ strategies = mm_strategy.strategies
90
+ filtered_strategies = []
91
+ for strtg in strategies:
92
+ # construct new strategy by consider the self arg
93
+ assert strtg.input_specs is not None
94
+ mat1_spec = strtg.input_specs[0]
95
+ mat2_spec = strtg.input_specs[1]
96
+ out_spec = strtg.output_spec
97
+
98
+ # self arg's spec should follow the output of mm, but need
99
+ # to consider broadcast for the self arg
100
+ broadcast_dims_map = infer_broadcast_dims_map(mm_out_shape, self_shape)
101
+ self_placements = map_placements_after_broadcast(
102
+ out_spec.placements, mm_out_shape, broadcast_dims_map
103
+ )
104
+ self_spec = DTensorSpec(mesh=mesh, placements=self_placements)
105
+
106
+ if is_tensor_shardable(
107
+ mat1_strategy.output_shape, mat1_spec
108
+ ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec):
109
+ # update input specs with new self spec
110
+ strtg.input_specs = (self_spec, mat1_spec, mat2_spec)
111
+
112
+ # associate costs
113
+ redistribute_cost = [
114
+ generate_redistribute_costs(self_strategy, self_spec),
115
+ generate_redistribute_costs(mat1_strategy, mat1_spec),
116
+ generate_redistribute_costs(mat2_strategy, mat2_spec),
117
+ ]
118
+ strtg.redistribute_cost = redistribute_cost
119
+ filtered_strategies.append(strtg)
120
+
121
+ mm_strategy.strategies = filtered_strategies
122
+
123
+ return mm_strategy
124
+
125
+
126
+ @register_op_strategy(aten.mm.default)
127
+ def mm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
128
+ return _mm_like_strategy("mk,kn->mn", mesh, op_schema)
129
+
130
+
131
+ @register_op_strategy(aten.addmm.default)
132
+ def addmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
133
+ return _addmm_like_strategy("mk,kn->mn", mesh, op_schema)
134
+
135
+
136
+ @register_op_strategy(aten.bmm.default)
137
+ def bmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
138
+ return _mm_like_strategy("bmk,bkn->bmn", mesh, op_schema)
139
+
140
+
141
+ @register_op_strategy(aten.baddbmm.default)
142
+ def baddmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy:
143
+ return _addmm_like_strategy("bmk,bkn->bmn", mesh, op_schema)
144
+
145
+
146
+ @register_op_strategy(aten._scaled_dot_product_flash_attention.default)
147
+ def scaled_dot_product_attention_strategy(
148
+ mesh: DeviceMesh, op_schema: OpSchema
149
+ ) -> OpStrategy:
150
+ # NOTE: currently we only support some simple strategies to support tensor parallelism
151
+ # TODO: sdpa might be a good candidate for us to explore decomposed sharding propagation
152
+ # as it involves: matmul, pointwise, reduction ops together.
153
+ return_debug_mask = len(op_schema.args_schema) >= 6 and op_schema.args_schema[5]
154
+ q_input_strategy = op_schema.args_schema[0]
155
+ assert isinstance(q_input_strategy, OpStrategy)
156
+ # q/k/v have the same shape
157
+ qkv_shape = q_input_strategy.output_shape
158
+
159
+ all_mesh_dim_strategies = []
160
+
161
+ for mesh_dim in range(mesh.ndim):
162
+ single_mesh_dim_strategies = []
163
+
164
+ # placement list stores placements of [outputs, inputs]
165
+ # in the spda case, we have 3 valid tensor outputs and 3 tensor inputs
166
+ # first we can always accept full replication for inputs and output
167
+ all_replicate: List[Placement] = [Replicate()] * 6
168
+ single_mesh_dim_strategies.append(all_replicate)
169
+
170
+ # second we can accept the sharding pattern of tensor parallelism, which
171
+ # shard on the num of head dim
172
+ qkv_sharding = Shard(1) # num head dim
173
+ output_sharding = Shard(1) # num head dim
174
+ logsumexp_sharding = Shard(1) # num head dim
175
+ if return_debug_mask:
176
+ debug_attn_mask_sharding: Placement = Shard(1) # num head dim
177
+ else:
178
+ # empty debug mask, replicated
179
+ debug_attn_mask_sharding = Replicate()
180
+
181
+ num_heads_dim_sharding = [
182
+ output_sharding,
183
+ logsumexp_sharding,
184
+ debug_attn_mask_sharding,
185
+ qkv_sharding,
186
+ qkv_sharding,
187
+ qkv_sharding,
188
+ ]
189
+ single_mesh_dim_strategies.append(num_heads_dim_sharding)
190
+
191
+ all_mesh_dim_strategies.append(single_mesh_dim_strategies)
192
+
193
+ strategy_combs = itertools.product(*all_mesh_dim_strategies)
194
+
195
+ all_strategies = []
196
+ for strategy_comb in strategy_combs:
197
+ spec_list = []
198
+ for specs in zip(*strategy_comb):
199
+ spec_list.append(DTensorSpec(mesh, tuple(specs)))
200
+
201
+ assert len(spec_list) == 6
202
+ input_expected_specs = spec_list[3:]
203
+ output_specs: List[Optional[DTensorSpec]] = list(spec_list[:3])
204
+ # fix up output_specs and fill in None for the int and empty tensor return values
205
+ for i in range(2, 8):
206
+ output_specs.insert(i, None)
207
+ if all(is_tensor_shardable(qkv_shape, spec) for spec in input_expected_specs):
208
+ # only add to the strategy list when all inputs are shardable
209
+ redistribute_cost = []
210
+ for input_idx, spec in enumerate(input_expected_specs):
211
+ qkv_strategy = op_schema.args_schema[input_idx]
212
+ assert isinstance(qkv_strategy, OpStrategy)
213
+ qkv_tensor_meta = qkv_strategy.strategies[0].output_spec.tensor_meta
214
+ spec.tensor_meta = qkv_tensor_meta
215
+ redistribute_cost.append(
216
+ generate_redistribute_costs(qkv_strategy, spec)
217
+ )
218
+
219
+ strat = PlacementStrategy(
220
+ output_specs=tuple(output_specs),
221
+ input_specs=tuple(input_expected_specs),
222
+ redistribute_cost=redistribute_cost,
223
+ )
224
+ all_strategies.append(strat)
225
+
226
+ return OpStrategy(all_strategies)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import torch
3
+ from torch.distributed._tensor.op_schema import (
4
+ OpSchema,
5
+ OpStrategy,
6
+ PlacementStrategy,
7
+ StrategyType,
8
+ )
9
+ from torch.distributed._tensor.ops.utils import is_tensor_partial, register_op_strategy
10
+ from torch.distributed.device_mesh import DeviceMesh
11
+
12
+ aten = torch.ops.aten
13
+
14
+
15
+ @register_op_strategy(
16
+ [aten.normal_.default, aten.uniform_.default, aten.native_dropout.default]
17
+ )
18
+ def random_op_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
19
+ self_strategy = op_schema.args_schema[0]
20
+ assert isinstance(self_strategy, OpStrategy)
21
+
22
+ random_strategy = OpStrategy([])
23
+ for arg_strategy in self_strategy.strategies:
24
+ arg_spec = arg_strategy.output_spec
25
+ if is_tensor_partial(arg_spec):
26
+ # TODO: figure out how inplace random op should behave when it's partial
27
+ raise RuntimeError(f"{op_schema.op} with _Partial is not supported yet!")
28
+ random_strategy.strategies.append(PlacementStrategy(output_specs=arg_spec))
29
+
30
+ return random_strategy
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc ADDED
Binary file (2.73 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_sgd.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (8.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/zero_redundancy_optimizer.cpython-310.pyc ADDED
Binary file (53.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/fft/__init__.py ADDED
@@ -0,0 +1,1360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import torch
4
+ from torch._C import _add_docstr, _fft # type: ignore[attr-defined]
5
+ from torch._torch_docs import factory_common_args, common_args
6
+
7
+ __all__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
8
+ 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
9
+ 'hfft', 'ihfft', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
10
+ 'Tensor']
11
+
12
+ Tensor = torch.Tensor
13
+
14
+ # Note: This not only adds the doc strings for the spectral ops, but
15
+ # connects the torch.fft Python namespace to the torch._C._fft builtins.
16
+
17
+ fft = _add_docstr(_fft.fft_fft, r"""
18
+ fft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
19
+
20
+ Computes the one dimensional discrete Fourier transform of :attr:`input`.
21
+
22
+ Note:
23
+ The Fourier domain representation of any real signal satisfies the
24
+ Hermitian property: `X[i] = conj(X[-i])`. This function always returns both
25
+ the positive and negative frequency terms even though, for real inputs, the
26
+ negative frequencies are redundant. :func:`~torch.fft.rfft` returns the
27
+ more compact one-sided representation where only the positive frequencies
28
+ are returned.
29
+
30
+ Note:
31
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
32
+ However it only supports powers of 2 signal length in every transformed dimension.
33
+
34
+ Args:
35
+ input (Tensor): the input tensor
36
+ n (int, optional): Signal length. If given, the input will either be zero-padded
37
+ or trimmed to this length before computing the FFT.
38
+ dim (int, optional): The dimension along which to take the one dimensional FFT.
39
+ norm (str, optional): Normalization mode. For the forward transform
40
+ (:func:`~torch.fft.fft`), these correspond to:
41
+
42
+ * ``"forward"`` - normalize by ``1/n``
43
+ * ``"backward"`` - no normalization
44
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
45
+
46
+ Calling the backward transform (:func:`~torch.fft.ifft`) with the same
47
+ normalization mode will apply an overall normalization of ``1/n`` between
48
+ the two transforms. This is required to make :func:`~torch.fft.ifft`
49
+ the exact inverse.
50
+
51
+ Default is ``"backward"`` (no normalization).
52
+
53
+ Keyword args:
54
+ {out}
55
+
56
+ Example:
57
+
58
+ >>> t = torch.arange(4)
59
+ >>> t
60
+ tensor([0, 1, 2, 3])
61
+ >>> torch.fft.fft(t)
62
+ tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
63
+
64
+ >>> t = torch.tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j])
65
+ >>> torch.fft.fft(t)
66
+ tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j])
67
+ """.format(**common_args))
68
+
69
+ ifft = _add_docstr(_fft.fft_ifft, r"""
70
+ ifft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
71
+
72
+ Computes the one dimensional inverse discrete Fourier transform of :attr:`input`.
73
+
74
+ Note:
75
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
76
+ However it only supports powers of 2 signal length in every transformed dimension.
77
+
78
+ Args:
79
+ input (Tensor): the input tensor
80
+ n (int, optional): Signal length. If given, the input will either be zero-padded
81
+ or trimmed to this length before computing the IFFT.
82
+ dim (int, optional): The dimension along which to take the one dimensional IFFT.
83
+ norm (str, optional): Normalization mode. For the backward transform
84
+ (:func:`~torch.fft.ifft`), these correspond to:
85
+
86
+ * ``"forward"`` - no normalization
87
+ * ``"backward"`` - normalize by ``1/n``
88
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
89
+
90
+ Calling the forward transform (:func:`~torch.fft.fft`) with the same
91
+ normalization mode will apply an overall normalization of ``1/n`` between
92
+ the two transforms. This is required to make :func:`~torch.fft.ifft`
93
+ the exact inverse.
94
+
95
+ Default is ``"backward"`` (normalize by ``1/n``).
96
+
97
+ Keyword args:
98
+ {out}
99
+
100
+ Example:
101
+
102
+ >>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
103
+ >>> torch.fft.ifft(t)
104
+ tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j])
105
+ """.format(**common_args))
106
+
107
+ fft2 = _add_docstr(_fft.fft_fft2, r"""
108
+ fft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
109
+
110
+ Computes the 2 dimensional discrete Fourier transform of :attr:`input`.
111
+ Equivalent to :func:`~torch.fft.fftn` but FFTs only the last two dimensions by default.
112
+
113
+ Note:
114
+ The Fourier domain representation of any real signal satisfies the
115
+ Hermitian property: ``X[i, j] = conj(X[-i, -j])``. This
116
+ function always returns all positive and negative frequency terms even
117
+ though, for real inputs, half of these values are redundant.
118
+ :func:`~torch.fft.rfft2` returns the more compact one-sided representation
119
+ where only the positive frequencies of the last dimension are returned.
120
+
121
+ Note:
122
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
123
+ However it only supports powers of 2 signal length in every transformed dimensions.
124
+
125
+ Args:
126
+ input (Tensor): the input tensor
127
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
128
+ If given, each dimension ``dim[i]`` will either be zero-padded or
129
+ trimmed to the length ``s[i]`` before computing the FFT.
130
+ If a length ``-1`` is specified, no padding is done in that dimension.
131
+ Default: ``s = [input.size(d) for d in dim]``
132
+ dim (Tuple[int], optional): Dimensions to be transformed.
133
+ Default: last two dimensions.
134
+ norm (str, optional): Normalization mode. For the forward transform
135
+ (:func:`~torch.fft.fft2`), these correspond to:
136
+
137
+ * ``"forward"`` - normalize by ``1/n``
138
+ * ``"backward"`` - no normalization
139
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
140
+
141
+ Where ``n = prod(s)`` is the logical FFT size.
142
+ Calling the backward transform (:func:`~torch.fft.ifft2`) with the same
143
+ normalization mode will apply an overall normalization of ``1/n``
144
+ between the two transforms. This is required to make
145
+ :func:`~torch.fft.ifft2` the exact inverse.
146
+
147
+ Default is ``"backward"`` (no normalization).
148
+
149
+ Keyword args:
150
+ {out}
151
+
152
+ Example:
153
+
154
+ >>> x = torch.rand(10, 10, dtype=torch.complex64)
155
+ >>> fft2 = torch.fft.fft2(x)
156
+
157
+ The discrete Fourier transform is separable, so :func:`~torch.fft.fft2`
158
+ here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
159
+
160
+ >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
161
+ >>> torch.testing.assert_close(fft2, two_ffts, check_stride=False)
162
+
163
+ """.format(**common_args))
164
+
165
+ ifft2 = _add_docstr(_fft.fft_ifft2, r"""
166
+ ifft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
167
+
168
+ Computes the 2 dimensional inverse discrete Fourier transform of :attr:`input`.
169
+ Equivalent to :func:`~torch.fft.ifftn` but IFFTs only the last two dimensions by default.
170
+
171
+ Note:
172
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
173
+ However it only supports powers of 2 signal length in every transformed dimensions.
174
+
175
+ Args:
176
+ input (Tensor): the input tensor
177
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
178
+ If given, each dimension ``dim[i]`` will either be zero-padded or
179
+ trimmed to the length ``s[i]`` before computing the IFFT.
180
+ If a length ``-1`` is specified, no padding is done in that dimension.
181
+ Default: ``s = [input.size(d) for d in dim]``
182
+ dim (Tuple[int], optional): Dimensions to be transformed.
183
+ Default: last two dimensions.
184
+ norm (str, optional): Normalization mode. For the backward transform
185
+ (:func:`~torch.fft.ifft2`), these correspond to:
186
+
187
+ * ``"forward"`` - no normalization
188
+ * ``"backward"`` - normalize by ``1/n``
189
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
190
+
191
+ Where ``n = prod(s)`` is the logical IFFT size.
192
+ Calling the forward transform (:func:`~torch.fft.fft2`) with the same
193
+ normalization mode will apply an overall normalization of ``1/n`` between
194
+ the two transforms. This is required to make :func:`~torch.fft.ifft2`
195
+ the exact inverse.
196
+
197
+ Default is ``"backward"`` (normalize by ``1/n``).
198
+
199
+ Keyword args:
200
+ {out}
201
+
202
+ Example:
203
+
204
+ >>> x = torch.rand(10, 10, dtype=torch.complex64)
205
+ >>> ifft2 = torch.fft.ifft2(x)
206
+
207
+ The discrete Fourier transform is separable, so :func:`~torch.fft.ifft2`
208
+ here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
209
+
210
+ >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
211
+ >>> torch.testing.assert_close(ifft2, two_iffts, check_stride=False)
212
+
213
+ """.format(**common_args))
214
+
215
+ fftn = _add_docstr(_fft.fft_fftn, r"""
216
+ fftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
217
+
218
+ Computes the N dimensional discrete Fourier transform of :attr:`input`.
219
+
220
+ Note:
221
+ The Fourier domain representation of any real signal satisfies the
222
+ Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This
223
+ function always returns all positive and negative frequency terms even
224
+ though, for real inputs, half of these values are redundant.
225
+ :func:`~torch.fft.rfftn` returns the more compact one-sided representation
226
+ where only the positive frequencies of the last dimension are returned.
227
+
228
+ Note:
229
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
230
+ However it only supports powers of 2 signal length in every transformed dimensions.
231
+
232
+ Args:
233
+ input (Tensor): the input tensor
234
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
235
+ If given, each dimension ``dim[i]`` will either be zero-padded or
236
+ trimmed to the length ``s[i]`` before computing the FFT.
237
+ If a length ``-1`` is specified, no padding is done in that dimension.
238
+ Default: ``s = [input.size(d) for d in dim]``
239
+ dim (Tuple[int], optional): Dimensions to be transformed.
240
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
241
+ norm (str, optional): Normalization mode. For the forward transform
242
+ (:func:`~torch.fft.fftn`), these correspond to:
243
+
244
+ * ``"forward"`` - normalize by ``1/n``
245
+ * ``"backward"`` - no normalization
246
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
247
+
248
+ Where ``n = prod(s)`` is the logical FFT size.
249
+ Calling the backward transform (:func:`~torch.fft.ifftn`) with the same
250
+ normalization mode will apply an overall normalization of ``1/n``
251
+ between the two transforms. This is required to make
252
+ :func:`~torch.fft.ifftn` the exact inverse.
253
+
254
+ Default is ``"backward"`` (no normalization).
255
+
256
+ Keyword args:
257
+ {out}
258
+
259
+ Example:
260
+
261
+ >>> x = torch.rand(10, 10, dtype=torch.complex64)
262
+ >>> fftn = torch.fft.fftn(x)
263
+
264
+ The discrete Fourier transform is separable, so :func:`~torch.fft.fftn`
265
+ here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
266
+
267
+ >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
268
+ >>> torch.testing.assert_close(fftn, two_ffts, check_stride=False)
269
+
270
+ """.format(**common_args))
271
+
272
+ ifftn = _add_docstr(_fft.fft_ifftn, r"""
273
+ ifftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
274
+
275
+ Computes the N dimensional inverse discrete Fourier transform of :attr:`input`.
276
+
277
+ Note:
278
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
279
+ However it only supports powers of 2 signal length in every transformed dimensions.
280
+
281
+ Args:
282
+ input (Tensor): the input tensor
283
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
284
+ If given, each dimension ``dim[i]`` will either be zero-padded or
285
+ trimmed to the length ``s[i]`` before computing the IFFT.
286
+ If a length ``-1`` is specified, no padding is done in that dimension.
287
+ Default: ``s = [input.size(d) for d in dim]``
288
+ dim (Tuple[int], optional): Dimensions to be transformed.
289
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
290
+ norm (str, optional): Normalization mode. For the backward transform
291
+ (:func:`~torch.fft.ifftn`), these correspond to:
292
+
293
+ * ``"forward"`` - no normalization
294
+ * ``"backward"`` - normalize by ``1/n``
295
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
296
+
297
+ Where ``n = prod(s)`` is the logical IFFT size.
298
+ Calling the forward transform (:func:`~torch.fft.fftn`) with the same
299
+ normalization mode will apply an overall normalization of ``1/n`` between
300
+ the two transforms. This is required to make :func:`~torch.fft.ifftn`
301
+ the exact inverse.
302
+
303
+ Default is ``"backward"`` (normalize by ``1/n``).
304
+
305
+ Keyword args:
306
+ {out}
307
+
308
+ Example:
309
+
310
+ >>> x = torch.rand(10, 10, dtype=torch.complex64)
311
+ >>> ifftn = torch.fft.ifftn(x)
312
+
313
+ The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn`
314
+ here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
315
+
316
+ >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
317
+ >>> torch.testing.assert_close(ifftn, two_iffts, check_stride=False)
318
+
319
+ """.format(**common_args))
320
+
321
+ rfft = _add_docstr(_fft.fft_rfft, r"""
322
+ rfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
323
+
324
+ Computes the one dimensional Fourier transform of real-valued :attr:`input`.
325
+
326
+ The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so
327
+ the output contains only the positive frequencies below the Nyquist frequency.
328
+ To compute the full output, use :func:`~torch.fft.fft`
329
+
330
+ Note:
331
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
332
+ However it only supports powers of 2 signal length in every transformed dimension.
333
+
334
+ Args:
335
+ input (Tensor): the real input tensor
336
+ n (int, optional): Signal length. If given, the input will either be zero-padded
337
+ or trimmed to this length before computing the real FFT.
338
+ dim (int, optional): The dimension along which to take the one dimensional real FFT.
339
+ norm (str, optional): Normalization mode. For the forward transform
340
+ (:func:`~torch.fft.rfft`), these correspond to:
341
+
342
+ * ``"forward"`` - normalize by ``1/n``
343
+ * ``"backward"`` - no normalization
344
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
345
+
346
+ Calling the backward transform (:func:`~torch.fft.irfft`) with the same
347
+ normalization mode will apply an overall normalization of ``1/n`` between
348
+ the two transforms. This is required to make :func:`~torch.fft.irfft`
349
+ the exact inverse.
350
+
351
+ Default is ``"backward"`` (no normalization).
352
+
353
+ Keyword args:
354
+ {out}
355
+
356
+ Example:
357
+
358
+ >>> t = torch.arange(4)
359
+ >>> t
360
+ tensor([0, 1, 2, 3])
361
+ >>> torch.fft.rfft(t)
362
+ tensor([ 6.+0.j, -2.+2.j, -2.+0.j])
363
+
364
+ Compare against the full output from :func:`~torch.fft.fft`:
365
+
366
+ >>> torch.fft.fft(t)
367
+ tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
368
+
369
+ Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted.
370
+ At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair,
371
+ and therefore must always be real-valued.
372
+ """.format(**common_args))
373
+
374
+ irfft = _add_docstr(_fft.fft_irfft, r"""
375
+ irfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
376
+
377
+ Computes the inverse of :func:`~torch.fft.rfft`.
378
+
379
+ :attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
380
+ domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the
381
+ output will be real-valued.
382
+
383
+ Note:
384
+ Some input frequencies must be real-valued to satisfy the Hermitian
385
+ property. In these cases the imaginary component will be ignored.
386
+ For example, any imaginary component in the zero-frequency term cannot
387
+ be represented in a real output and so will always be ignored.
388
+
389
+ Note:
390
+ The correct interpretation of the Hermitian input depends on the length of
391
+ the original data, as given by :attr:`n`. This is because each input shape
392
+ could correspond to either an odd or even length signal. By default, the
393
+ signal is assumed to be even length and odd signals will not round-trip
394
+ properly. So, it is recommended to always pass the signal length :attr:`n`.
395
+
396
+ Note:
397
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
398
+ However it only supports powers of 2 signal length in every transformed dimension.
399
+ With default arguments, size of the transformed dimension should be (2^n + 1) as argument
400
+ `n` defaults to even output size = 2 * (transformed_dim_size - 1)
401
+
402
+ Args:
403
+ input (Tensor): the input tensor representing a half-Hermitian signal
404
+ n (int, optional): Output signal length. This determines the length of the
405
+ output signal. If given, the input will either be zero-padded or trimmed to this
406
+ length before computing the real IFFT.
407
+ Defaults to even output: ``n=2*(input.size(dim) - 1)``.
408
+ dim (int, optional): The dimension along which to take the one dimensional real IFFT.
409
+ norm (str, optional): Normalization mode. For the backward transform
410
+ (:func:`~torch.fft.irfft`), these correspond to:
411
+
412
+ * ``"forward"`` - no normalization
413
+ * ``"backward"`` - normalize by ``1/n``
414
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
415
+
416
+ Calling the forward transform (:func:`~torch.fft.rfft`) with the same
417
+ normalization mode will apply an overall normalization of ``1/n`` between
418
+ the two transforms. This is required to make :func:`~torch.fft.irfft`
419
+ the exact inverse.
420
+
421
+ Default is ``"backward"`` (normalize by ``1/n``).
422
+
423
+ Keyword args:
424
+ {out}
425
+
426
+ Example:
427
+
428
+ >>> t = torch.linspace(0, 1, 5)
429
+ >>> t
430
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
431
+ >>> T = torch.fft.rfft(t)
432
+ >>> T
433
+ tensor([ 2.5000+0.0000j, -0.6250+0.8602j, -0.6250+0.2031j])
434
+
435
+ Without specifying the output length to :func:`~torch.fft.irfft`, the output
436
+ will not round-trip properly because the input is odd-length:
437
+
438
+ >>> torch.fft.irfft(T)
439
+ tensor([0.1562, 0.3511, 0.7812, 1.2114])
440
+
441
+ So, it is recommended to always pass the signal length :attr:`n`:
442
+
443
+ >>> roundtrip = torch.fft.irfft(T, t.numel())
444
+ >>> torch.testing.assert_close(roundtrip, t, check_stride=False)
445
+
446
+ """.format(**common_args))
447
+
448
+ rfft2 = _add_docstr(_fft.fft_rfft2, r"""
449
+ rfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
450
+
451
+ Computes the 2-dimensional discrete Fourier transform of real :attr:`input`.
452
+ Equivalent to :func:`~torch.fft.rfftn` but FFTs only the last two dimensions by default.
453
+
454
+ The FFT of a real signal is Hermitian-symmetric, ``X[i, j] = conj(X[-i, -j])``,
455
+ so the full :func:`~torch.fft.fft2` output contains redundant information.
456
+ :func:`~torch.fft.rfft2` instead omits the negative frequencies in the last
457
+ dimension.
458
+
459
+ Note:
460
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
461
+ However it only supports powers of 2 signal length in every transformed dimensions.
462
+
463
+ Args:
464
+ input (Tensor): the input tensor
465
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
466
+ If given, each dimension ``dim[i]`` will either be zero-padded or
467
+ trimmed to the length ``s[i]`` before computing the real FFT.
468
+ If a length ``-1`` is specified, no padding is done in that dimension.
469
+ Default: ``s = [input.size(d) for d in dim]``
470
+ dim (Tuple[int], optional): Dimensions to be transformed.
471
+ Default: last two dimensions.
472
+ norm (str, optional): Normalization mode. For the forward transform
473
+ (:func:`~torch.fft.rfft2`), these correspond to:
474
+
475
+ * ``"forward"`` - normalize by ``1/n``
476
+ * ``"backward"`` - no normalization
477
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
478
+
479
+ Where ``n = prod(s)`` is the logical FFT size.
480
+ Calling the backward transform (:func:`~torch.fft.irfft2`) with the same
481
+ normalization mode will apply an overall normalization of ``1/n`` between
482
+ the two transforms. This is required to make :func:`~torch.fft.irfft2`
483
+ the exact inverse.
484
+
485
+ Default is ``"backward"`` (no normalization).
486
+
487
+ Keyword args:
488
+ {out}
489
+
490
+ Example:
491
+
492
+ >>> t = torch.rand(10, 10)
493
+ >>> rfft2 = torch.fft.rfft2(t)
494
+ >>> rfft2.size()
495
+ torch.Size([10, 6])
496
+
497
+ Compared against the full output from :func:`~torch.fft.fft2`, we have all
498
+ elements up to the Nyquist frequency.
499
+
500
+ >>> fft2 = torch.fft.fft2(t)
501
+ >>> torch.testing.assert_close(fft2[..., :6], rfft2, check_stride=False)
502
+
503
+ The discrete Fourier transform is separable, so :func:`~torch.fft.rfft2`
504
+ here is equivalent to a combination of :func:`~torch.fft.fft` and
505
+ :func:`~torch.fft.rfft`:
506
+
507
+ >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
508
+ >>> torch.testing.assert_close(rfft2, two_ffts, check_stride=False)
509
+
510
+ """.format(**common_args))
511
+
512
+ irfft2 = _add_docstr(_fft.fft_irfft2, r"""
513
+ irfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
514
+
515
+ Computes the inverse of :func:`~torch.fft.rfft2`.
516
+ Equivalent to :func:`~torch.fft.irfftn` but IFFTs only the last two dimensions by default.
517
+
518
+ :attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
519
+ domain, as produced by :func:`~torch.fft.rfft2`. By the Hermitian property, the
520
+ output will be real-valued.
521
+
522
+ Note:
523
+ Some input frequencies must be real-valued to satisfy the Hermitian
524
+ property. In these cases the imaginary component will be ignored.
525
+ For example, any imaginary component in the zero-frequency term cannot
526
+ be represented in a real output and so will always be ignored.
527
+
528
+ Note:
529
+ The correct interpretation of the Hermitian input depends on the length of
530
+ the original data, as given by :attr:`s`. This is because each input shape
531
+ could correspond to either an odd or even length signal. By default, the
532
+ signal is assumed to be even length and odd signals will not round-trip
533
+ properly. So, it is recommended to always pass the signal shape :attr:`s`.
534
+
535
+ Note:
536
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
537
+ However it only supports powers of 2 signal length in every transformed dimensions.
538
+ With default arguments, the size of last dimension should be (2^n + 1) as argument
539
+ `s` defaults to even output size = 2 * (last_dim_size - 1)
540
+
541
+ Args:
542
+ input (Tensor): the input tensor
543
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
544
+ If given, each dimension ``dim[i]`` will either be zero-padded or
545
+ trimmed to the length ``s[i]`` before computing the real FFT.
546
+ If a length ``-1`` is specified, no padding is done in that dimension.
547
+ Defaults to even output in the last dimension:
548
+ ``s[-1] = 2*(input.size(dim[-1]) - 1)``.
549
+ dim (Tuple[int], optional): Dimensions to be transformed.
550
+ The last dimension must be the half-Hermitian compressed dimension.
551
+ Default: last two dimensions.
552
+ norm (str, optional): Normalization mode. For the backward transform
553
+ (:func:`~torch.fft.irfft2`), these correspond to:
554
+
555
+ * ``"forward"`` - no normalization
556
+ * ``"backward"`` - normalize by ``1/n``
557
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
558
+
559
+ Where ``n = prod(s)`` is the logical IFFT size.
560
+ Calling the forward transform (:func:`~torch.fft.rfft2`) with the same
561
+ normalization mode will apply an overall normalization of ``1/n`` between
562
+ the two transforms. This is required to make :func:`~torch.fft.irfft2`
563
+ the exact inverse.
564
+
565
+ Default is ``"backward"`` (normalize by ``1/n``).
566
+
567
+ Keyword args:
568
+ {out}
569
+
570
+ Example:
571
+
572
+ >>> t = torch.rand(10, 9)
573
+ >>> T = torch.fft.rfft2(t)
574
+
575
+ Without specifying the output length to :func:`~torch.fft.irfft2`, the output
576
+ will not round-trip properly because the input is odd-length in the last
577
+ dimension:
578
+
579
+ >>> torch.fft.irfft2(T).size()
580
+ torch.Size([10, 8])
581
+
582
+ So, it is recommended to always pass the signal shape :attr:`s`.
583
+
584
+ >>> roundtrip = torch.fft.irfft2(T, t.size())
585
+ >>> roundtrip.size()
586
+ torch.Size([10, 9])
587
+ >>> torch.testing.assert_close(roundtrip, t, check_stride=False)
588
+
589
+ """.format(**common_args))
590
+
591
+ rfftn = _add_docstr(_fft.fft_rfftn, r"""
592
+ rfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
593
+
594
+ Computes the N-dimensional discrete Fourier transform of real :attr:`input`.
595
+
596
+ The FFT of a real signal is Hermitian-symmetric,
597
+ ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])`` so the full
598
+ :func:`~torch.fft.fftn` output contains redundant information.
599
+ :func:`~torch.fft.rfftn` instead omits the negative frequencies in the
600
+ last dimension.
601
+
602
+ Note:
603
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
604
+ However it only supports powers of 2 signal length in every transformed dimensions.
605
+
606
+ Args:
607
+ input (Tensor): the input tensor
608
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
609
+ If given, each dimension ``dim[i]`` will either be zero-padded or
610
+ trimmed to the length ``s[i]`` before computing the real FFT.
611
+ If a length ``-1`` is specified, no padding is done in that dimension.
612
+ Default: ``s = [input.size(d) for d in dim]``
613
+ dim (Tuple[int], optional): Dimensions to be transformed.
614
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
615
+ norm (str, optional): Normalization mode. For the forward transform
616
+ (:func:`~torch.fft.rfftn`), these correspond to:
617
+
618
+ * ``"forward"`` - normalize by ``1/n``
619
+ * ``"backward"`` - no normalization
620
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
621
+
622
+ Where ``n = prod(s)`` is the logical FFT size.
623
+ Calling the backward transform (:func:`~torch.fft.irfftn`) with the same
624
+ normalization mode will apply an overall normalization of ``1/n`` between
625
+ the two transforms. This is required to make :func:`~torch.fft.irfftn`
626
+ the exact inverse.
627
+
628
+ Default is ``"backward"`` (no normalization).
629
+
630
+ Keyword args:
631
+ {out}
632
+
633
+ Example:
634
+
635
+ >>> t = torch.rand(10, 10)
636
+ >>> rfftn = torch.fft.rfftn(t)
637
+ >>> rfftn.size()
638
+ torch.Size([10, 6])
639
+
640
+ Compared against the full output from :func:`~torch.fft.fftn`, we have all
641
+ elements up to the Nyquist frequency.
642
+
643
+ >>> fftn = torch.fft.fftn(t)
644
+ >>> torch.testing.assert_close(fftn[..., :6], rfftn, check_stride=False)
645
+
646
+ The discrete Fourier transform is separable, so :func:`~torch.fft.rfftn`
647
+ here is equivalent to a combination of :func:`~torch.fft.fft` and
648
+ :func:`~torch.fft.rfft`:
649
+
650
+ >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
651
+ >>> torch.testing.assert_close(rfftn, two_ffts, check_stride=False)
652
+
653
+ """.format(**common_args))
654
+
655
+ irfftn = _add_docstr(_fft.fft_irfftn, r"""
656
+ irfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
657
+
658
+ Computes the inverse of :func:`~torch.fft.rfftn`.
659
+
660
+ :attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
661
+ domain, as produced by :func:`~torch.fft.rfftn`. By the Hermitian property, the
662
+ output will be real-valued.
663
+
664
+ Note:
665
+ Some input frequencies must be real-valued to satisfy the Hermitian
666
+ property. In these cases the imaginary component will be ignored.
667
+ For example, any imaginary component in the zero-frequency term cannot
668
+ be represented in a real output and so will always be ignored.
669
+
670
+ Note:
671
+ The correct interpretation of the Hermitian input depends on the length of
672
+ the original data, as given by :attr:`s`. This is because each input shape
673
+ could correspond to either an odd or even length signal. By default, the
674
+ signal is assumed to be even length and odd signals will not round-trip
675
+ properly. So, it is recommended to always pass the signal shape :attr:`s`.
676
+
677
+ Note:
678
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
679
+ However it only supports powers of 2 signal length in every transformed dimensions.
680
+ With default arguments, the size of last dimension should be (2^n + 1) as argument
681
+ `s` defaults to even output size = 2 * (last_dim_size - 1)
682
+
683
+ Args:
684
+ input (Tensor): the input tensor
685
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
686
+ If given, each dimension ``dim[i]`` will either be zero-padded or
687
+ trimmed to the length ``s[i]`` before computing the real FFT.
688
+ If a length ``-1`` is specified, no padding is done in that dimension.
689
+ Defaults to even output in the last dimension:
690
+ ``s[-1] = 2*(input.size(dim[-1]) - 1)``.
691
+ dim (Tuple[int], optional): Dimensions to be transformed.
692
+ The last dimension must be the half-Hermitian compressed dimension.
693
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
694
+ norm (str, optional): Normalization mode. For the backward transform
695
+ (:func:`~torch.fft.irfftn`), these correspond to:
696
+
697
+ * ``"forward"`` - no normalization
698
+ * ``"backward"`` - normalize by ``1/n``
699
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
700
+
701
+ Where ``n = prod(s)`` is the logical IFFT size.
702
+ Calling the forward transform (:func:`~torch.fft.rfftn`) with the same
703
+ normalization mode will apply an overall normalization of ``1/n`` between
704
+ the two transforms. This is required to make :func:`~torch.fft.irfftn`
705
+ the exact inverse.
706
+
707
+ Default is ``"backward"`` (normalize by ``1/n``).
708
+
709
+ Keyword args:
710
+ {out}
711
+
712
+ Example:
713
+
714
+ >>> t = torch.rand(10, 9)
715
+ >>> T = torch.fft.rfftn(t)
716
+
717
+ Without specifying the output length to :func:`~torch.fft.irfft`, the output
718
+ will not round-trip properly because the input is odd-length in the last
719
+ dimension:
720
+
721
+ >>> torch.fft.irfftn(T).size()
722
+ torch.Size([10, 8])
723
+
724
+ So, it is recommended to always pass the signal shape :attr:`s`.
725
+
726
+ >>> roundtrip = torch.fft.irfftn(T, t.size())
727
+ >>> roundtrip.size()
728
+ torch.Size([10, 9])
729
+ >>> torch.testing.assert_close(roundtrip, t, check_stride=False)
730
+
731
+ """.format(**common_args))
732
+
733
+ hfft = _add_docstr(_fft.fft_hfft, r"""
734
+ hfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
735
+
736
+ Computes the one dimensional discrete Fourier transform of a Hermitian
737
+ symmetric :attr:`input` signal.
738
+
739
+ Note:
740
+
741
+ :func:`~torch.fft.hfft`/:func:`~torch.fft.ihfft` are analogous to
742
+ :func:`~torch.fft.rfft`/:func:`~torch.fft.irfft`. The real FFT expects
743
+ a real signal in the time-domain and gives a Hermitian symmetry in the
744
+ frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
745
+ the time-domain and real-valued in the frequency-domain. For this reason,
746
+ special care needs to be taken with the length argument :attr:`n`, in the
747
+ same way as with :func:`~torch.fft.irfft`.
748
+
749
+ Note:
750
+ Because the signal is Hermitian in the time-domain, the result will be
751
+ real in the frequency domain. Note that some input frequencies must be
752
+ real-valued to satisfy the Hermitian property. In these cases the imaginary
753
+ component will be ignored. For example, any imaginary component in
754
+ ``input[0]`` would result in one or more complex frequency terms which
755
+ cannot be represented in a real output and so will always be ignored.
756
+
757
+ Note:
758
+ The correct interpretation of the Hermitian input depends on the length of
759
+ the original data, as given by :attr:`n`. This is because each input shape
760
+ could correspond to either an odd or even length signal. By default, the
761
+ signal is assumed to be even length and odd signals will not round-trip
762
+ properly. So, it is recommended to always pass the signal length :attr:`n`.
763
+
764
+ Note:
765
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
766
+ However it only supports powers of 2 signal length in every transformed dimension.
767
+ With default arguments, size of the transformed dimension should be (2^n + 1) as argument
768
+ `n` defaults to even output size = 2 * (transformed_dim_size - 1)
769
+
770
+ Args:
771
+ input (Tensor): the input tensor representing a half-Hermitian signal
772
+ n (int, optional): Output signal length. This determines the length of the
773
+ real output. If given, the input will either be zero-padded or trimmed to this
774
+ length before computing the Hermitian FFT.
775
+ Defaults to even output: ``n=2*(input.size(dim) - 1)``.
776
+ dim (int, optional): The dimension along which to take the one dimensional Hermitian FFT.
777
+ norm (str, optional): Normalization mode. For the forward transform
778
+ (:func:`~torch.fft.hfft`), these correspond to:
779
+
780
+ * ``"forward"`` - normalize by ``1/n``
781
+ * ``"backward"`` - no normalization
782
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
783
+
784
+ Calling the backward transform (:func:`~torch.fft.ihfft`) with the same
785
+ normalization mode will apply an overall normalization of ``1/n`` between
786
+ the two transforms. This is required to make :func:`~torch.fft.ihfft`
787
+ the exact inverse.
788
+
789
+ Default is ``"backward"`` (no normalization).
790
+
791
+ Keyword args:
792
+ {out}
793
+
794
+ Example:
795
+
796
+ Taking a real-valued frequency signal and bringing it into the time domain
797
+ gives Hermitian symmetric output:
798
+
799
+ >>> t = torch.linspace(0, 1, 5)
800
+ >>> t
801
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
802
+ >>> T = torch.fft.ifft(t)
803
+ >>> T
804
+ tensor([ 0.5000-0.0000j, -0.1250-0.1720j, -0.1250-0.0406j, -0.1250+0.0406j,
805
+ -0.1250+0.1720j])
806
+
807
+ Note that ``T[1] == T[-1].conj()`` and ``T[2] == T[-2].conj()`` is
808
+ redundant. We can thus compute the forward transform without considering
809
+ negative frequencies:
810
+
811
+ >>> torch.fft.hfft(T[:3], n=5)
812
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
813
+
814
+ Like with :func:`~torch.fft.irfft`, the output length must be given in order
815
+ to recover an even length output:
816
+
817
+ >>> torch.fft.hfft(T[:3])
818
+ tensor([0.1250, 0.2809, 0.6250, 0.9691])
819
+ """.format(**common_args))
820
+
821
+ ihfft = _add_docstr(_fft.fft_ihfft, r"""
822
+ ihfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
823
+
824
+ Computes the inverse of :func:`~torch.fft.hfft`.
825
+
826
+ :attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
827
+ The IFFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``.
828
+ :func:`~torch.fft.ihfft` represents this in the one-sided form where only the
829
+ positive frequencies below the Nyquist frequency are included. To compute the
830
+ full output, use :func:`~torch.fft.ifft`.
831
+
832
+ Note:
833
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
834
+ However it only supports powers of 2 signal length in every transformed dimension.
835
+
836
+ Args:
837
+ input (Tensor): the real input tensor
838
+ n (int, optional): Signal length. If given, the input will either be zero-padded
839
+ or trimmed to this length before computing the Hermitian IFFT.
840
+ dim (int, optional): The dimension along which to take the one dimensional Hermitian IFFT.
841
+ norm (str, optional): Normalization mode. For the backward transform
842
+ (:func:`~torch.fft.ihfft`), these correspond to:
843
+
844
+ * ``"forward"`` - no normalization
845
+ * ``"backward"`` - normalize by ``1/n``
846
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
847
+
848
+ Calling the forward transform (:func:`~torch.fft.hfft`) with the same
849
+ normalization mode will apply an overall normalization of ``1/n`` between
850
+ the two transforms. This is required to make :func:`~torch.fft.ihfft`
851
+ the exact inverse.
852
+
853
+ Default is ``"backward"`` (normalize by ``1/n``).
854
+
855
+ Keyword args:
856
+ {out}
857
+
858
+ Example:
859
+
860
+ >>> t = torch.arange(5)
861
+ >>> t
862
+ tensor([0, 1, 2, 3, 4])
863
+ >>> torch.fft.ihfft(t)
864
+ tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j])
865
+
866
+ Compare against the full output from :func:`~torch.fft.ifft`:
867
+
868
+ >>> torch.fft.ifft(t)
869
+ tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j, -0.5000+0.1625j,
870
+ -0.5000+0.6882j])
871
+ """.format(**common_args))
872
+
873
+ hfft2 = _add_docstr(_fft.fft_hfft2, r"""
874
+ hfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
875
+
876
+ Computes the 2-dimensional discrete Fourier transform of a Hermitian symmetric
877
+ :attr:`input` signal. Equivalent to :func:`~torch.fft.hfftn` but only
878
+ transforms the last two dimensions by default.
879
+
880
+ :attr:`input` is interpreted as a one-sided Hermitian signal in the time
881
+ domain. By the Hermitian property, the Fourier transform will be real-valued.
882
+
883
+ Note:
884
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
885
+ However it only supports powers of 2 signal length in every transformed dimensions.
886
+ With default arguments, the size of last dimension should be (2^n + 1) as argument
887
+ `s` defaults to even output size = 2 * (last_dim_size - 1)
888
+
889
+ Args:
890
+ input (Tensor): the input tensor
891
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
892
+ If given, each dimension ``dim[i]`` will either be zero-padded or
893
+ trimmed to the length ``s[i]`` before computing the Hermitian FFT.
894
+ If a length ``-1`` is specified, no padding is done in that dimension.
895
+ Defaults to even output in the last dimension:
896
+ ``s[-1] = 2*(input.size(dim[-1]) - 1)``.
897
+ dim (Tuple[int], optional): Dimensions to be transformed.
898
+ The last dimension must be the half-Hermitian compressed dimension.
899
+ Default: last two dimensions.
900
+ norm (str, optional): Normalization mode. For the forward transform
901
+ (:func:`~torch.fft.hfft2`), these correspond to:
902
+
903
+ * ``"forward"`` - normalize by ``1/n``
904
+ * ``"backward"`` - no normalization
905
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
906
+
907
+ Where ``n = prod(s)`` is the logical FFT size.
908
+ Calling the backward transform (:func:`~torch.fft.ihfft2`) with the same
909
+ normalization mode will apply an overall normalization of ``1/n`` between
910
+ the two transforms. This is required to make :func:`~torch.fft.ihfft2`
911
+ the exact inverse.
912
+
913
+ Default is ``"backward"`` (no normalization).
914
+
915
+ Keyword args:
916
+ {out}
917
+
918
+ Example:
919
+
920
+ Starting from a real frequency-space signal, we can generate a
921
+ Hermitian-symmetric time-domain signal:
922
+ >>> T = torch.rand(10, 9)
923
+ >>> t = torch.fft.ihfft2(T)
924
+
925
+ Without specifying the output length to :func:`~torch.fft.hfftn`, the
926
+ output will not round-trip properly because the input is odd-length in the
927
+ last dimension:
928
+
929
+ >>> torch.fft.hfft2(t).size()
930
+ torch.Size([10, 10])
931
+
932
+ So, it is recommended to always pass the signal shape :attr:`s`.
933
+
934
+ >>> roundtrip = torch.fft.hfft2(t, T.size())
935
+ >>> roundtrip.size()
936
+ torch.Size([10, 9])
937
+ >>> torch.allclose(roundtrip, T)
938
+ True
939
+
940
+ """.format(**common_args))
941
+
942
+ ihfft2 = _add_docstr(_fft.fft_ihfft2, r"""
943
+ ihfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
944
+
945
+ Computes the 2-dimensional inverse discrete Fourier transform of real
946
+ :attr:`input`. Equivalent to :func:`~torch.fft.ihfftn` but transforms only the
947
+ two last dimensions by default.
948
+
949
+ Note:
950
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
951
+ However it only supports powers of 2 signal length in every transformed dimensions.
952
+
953
+ Args:
954
+ input (Tensor): the input tensor
955
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
956
+ If given, each dimension ``dim[i]`` will either be zero-padded or
957
+ trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
958
+ If a length ``-1`` is specified, no padding is done in that dimension.
959
+ Default: ``s = [input.size(d) for d in dim]``
960
+ dim (Tuple[int], optional): Dimensions to be transformed.
961
+ Default: last two dimensions.
962
+ norm (str, optional): Normalization mode. For the backward transform
963
+ (:func:`~torch.fft.ihfft2`), these correspond to:
964
+
965
+ * ``"forward"`` - no normalization
966
+ * ``"backward"`` - normalize by ``1/n``
967
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
968
+
969
+ Where ``n = prod(s)`` is the logical IFFT size.
970
+ Calling the forward transform (:func:`~torch.fft.hfft2`) with the same
971
+ normalization mode will apply an overall normalization of ``1/n`` between
972
+ the two transforms. This is required to make :func:`~torch.fft.ihfft2`
973
+ the exact inverse.
974
+
975
+ Default is ``"backward"`` (normalize by ``1/n``).
976
+
977
+ Keyword args:
978
+ {out}
979
+
980
+ Example:
981
+
982
+ >>> T = torch.rand(10, 10)
983
+ >>> t = torch.fft.ihfft2(t)
984
+ >>> t.size()
985
+ torch.Size([10, 6])
986
+
987
+ Compared against the full output from :func:`~torch.fft.ifft2`, the
988
+ Hermitian time-space signal takes up only half the space.
989
+
990
+ >>> fftn = torch.fft.ifft2(t)
991
+ >>> torch.allclose(fftn[..., :6], rfftn)
992
+ True
993
+
994
+ The discrete Fourier transform is separable, so :func:`~torch.fft.ihfft2`
995
+ here is equivalent to a combination of :func:`~torch.fft.ifft` and
996
+ :func:`~torch.fft.ihfft`:
997
+
998
+ >>> two_ffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
999
+ >>> torch.allclose(t, two_ffts)
1000
+ True
1001
+
1002
+ """.format(**common_args))
1003
+
1004
+ hfftn = _add_docstr(_fft.fft_hfftn, r"""
1005
+ hfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
1006
+
1007
+ Computes the n-dimensional discrete Fourier transform of a Hermitian symmetric
1008
+ :attr:`input` signal.
1009
+
1010
+ :attr:`input` is interpreted as a one-sided Hermitian signal in the time
1011
+ domain. By the Hermitian property, the Fourier transform will be real-valued.
1012
+
1013
+ Note:
1014
+ :func:`~torch.fft.hfftn`/:func:`~torch.fft.ihfftn` are analogous to
1015
+ :func:`~torch.fft.rfftn`/:func:`~torch.fft.irfftn`. The real FFT expects
1016
+ a real signal in the time-domain and gives Hermitian symmetry in the
1017
+ frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
1018
+ the time-domain and real-valued in the frequency-domain. For this reason,
1019
+ special care needs to be taken with the shape argument :attr:`s`, in the
1020
+ same way as with :func:`~torch.fft.irfftn`.
1021
+
1022
+ Note:
1023
+ Some input frequencies must be real-valued to satisfy the Hermitian
1024
+ property. In these cases the imaginary component will be ignored.
1025
+ For example, any imaginary component in the zero-frequency term cannot
1026
+ be represented in a real output and so will always be ignored.
1027
+
1028
+ Note:
1029
+ The correct interpretation of the Hermitian input depends on the length of
1030
+ the original data, as given by :attr:`s`. This is because each input shape
1031
+ could correspond to either an odd or even length signal. By default, the
1032
+ signal is assumed to be even length and odd signals will not round-trip
1033
+ properly. It is recommended to always pass the signal shape :attr:`s`.
1034
+
1035
+ Note:
1036
+ Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
1037
+ However it only supports powers of 2 signal length in every transformed dimensions.
1038
+ With default arguments, the size of last dimension should be (2^n + 1) as argument
1039
+ `s` defaults to even output size = 2 * (last_dim_size - 1)
1040
+
1041
+ Args:
1042
+ input (Tensor): the input tensor
1043
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
1044
+ If given, each dimension ``dim[i]`` will either be zero-padded or
1045
+ trimmed to the length ``s[i]`` before computing the real FFT.
1046
+ If a length ``-1`` is specified, no padding is done in that dimension.
1047
+ Defaults to even output in the last dimension:
1048
+ ``s[-1] = 2*(input.size(dim[-1]) - 1)``.
1049
+ dim (Tuple[int], optional): Dimensions to be transformed.
1050
+ The last dimension must be the half-Hermitian compressed dimension.
1051
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
1052
+ norm (str, optional): Normalization mode. For the forward transform
1053
+ (:func:`~torch.fft.hfftn`), these correspond to:
1054
+
1055
+ * ``"forward"`` - normalize by ``1/n``
1056
+ * ``"backward"`` - no normalization
1057
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
1058
+
1059
+ Where ``n = prod(s)`` is the logical FFT size.
1060
+ Calling the backward transform (:func:`~torch.fft.ihfftn`) with the same
1061
+ normalization mode will apply an overall normalization of ``1/n`` between
1062
+ the two transforms. This is required to make :func:`~torch.fft.ihfftn`
1063
+ the exact inverse.
1064
+
1065
+ Default is ``"backward"`` (no normalization).
1066
+
1067
+ Keyword args:
1068
+ {out}
1069
+
1070
+ Example:
1071
+
1072
+ Starting from a real frequency-space signal, we can generate a
1073
+ Hermitian-symmetric time-domain signal:
1074
+ >>> T = torch.rand(10, 9)
1075
+ >>> t = torch.fft.ihfftn(T)
1076
+
1077
+ Without specifying the output length to :func:`~torch.fft.hfftn`, the
1078
+ output will not round-trip properly because the input is odd-length in the
1079
+ last dimension:
1080
+
1081
+ >>> torch.fft.hfftn(t).size()
1082
+ torch.Size([10, 10])
1083
+
1084
+ So, it is recommended to always pass the signal shape :attr:`s`.
1085
+
1086
+ >>> roundtrip = torch.fft.hfftn(t, T.size())
1087
+ >>> roundtrip.size()
1088
+ torch.Size([10, 9])
1089
+ >>> torch.allclose(roundtrip, T)
1090
+ True
1091
+
1092
+ """.format(**common_args))
1093
+
1094
+ ihfftn = _add_docstr(_fft.fft_ihfftn, r"""
1095
+ ihfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
1096
+
1097
+ Computes the N-dimensional inverse discrete Fourier transform of real :attr:`input`.
1098
+
1099
+ :attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
1100
+ The n-dimensional IFFT of a real signal is Hermitian-symmetric,
1101
+ ``X[i, j, ...] = conj(X[-i, -j, ...])``. :func:`~torch.fft.ihfftn` represents
1102
+ this in the one-sided form where only the positive frequencies below the
1103
+ Nyquist frequency are included in the last signal dimension. To compute the
1104
+ full output, use :func:`~torch.fft.ifftn`.
1105
+
1106
+ Note:
1107
+ Supports torch.half on CUDA with GPU Architecture SM53 or greater.
1108
+ However it only supports powers of 2 signal length in every transformed dimensions.
1109
+
1110
+ Args:
1111
+ input (Tensor): the input tensor
1112
+ s (Tuple[int], optional): Signal size in the transformed dimensions.
1113
+ If given, each dimension ``dim[i]`` will either be zero-padded or
1114
+ trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
1115
+ If a length ``-1`` is specified, no padding is done in that dimension.
1116
+ Default: ``s = [input.size(d) for d in dim]``
1117
+ dim (Tuple[int], optional): Dimensions to be transformed.
1118
+ Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
1119
+ norm (str, optional): Normalization mode. For the backward transform
1120
+ (:func:`~torch.fft.ihfftn`), these correspond to:
1121
+
1122
+ * ``"forward"`` - no normalization
1123
+ * ``"backward"`` - normalize by ``1/n``
1124
+ * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
1125
+
1126
+ Where ``n = prod(s)`` is the logical IFFT size.
1127
+ Calling the forward transform (:func:`~torch.fft.hfftn`) with the same
1128
+ normalization mode will apply an overall normalization of ``1/n`` between
1129
+ the two transforms. This is required to make :func:`~torch.fft.ihfftn`
1130
+ the exact inverse.
1131
+
1132
+ Default is ``"backward"`` (normalize by ``1/n``).
1133
+
1134
+ Keyword args:
1135
+ {out}
1136
+
1137
+ Example:
1138
+
1139
+ >>> T = torch.rand(10, 10)
1140
+ >>> ihfftn = torch.fft.ihfftn(T)
1141
+ >>> ihfftn.size()
1142
+ torch.Size([10, 6])
1143
+
1144
+ Compared against the full output from :func:`~torch.fft.ifftn`, we have all
1145
+ elements up to the Nyquist frequency.
1146
+
1147
+ >>> ifftn = torch.fft.ifftn(t)
1148
+ >>> torch.allclose(ifftn[..., :6], ihfftn)
1149
+ True
1150
+
1151
+ The discrete Fourier transform is separable, so :func:`~torch.fft.ihfftn`
1152
+ here is equivalent to a combination of :func:`~torch.fft.ihfft` and
1153
+ :func:`~torch.fft.ifft`:
1154
+
1155
+ >>> two_iffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
1156
+ >>> torch.allclose(ihfftn, two_iffts)
1157
+ True
1158
+
1159
+ """.format(**common_args))
1160
+
1161
+ fftfreq = _add_docstr(_fft.fft_fftfreq, r"""
1162
+ fftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
1163
+
1164
+ Computes the discrete Fourier Transform sample frequencies for a signal of size :attr:`n`.
1165
+
1166
+ Note:
1167
+ By convention, :func:`~torch.fft.fft` returns positive frequency terms
1168
+ first, followed by the negative frequencies in reverse order, so that
1169
+ ``f[-i]`` for all :math:`0 < i \leq n/2`` in Python gives the negative
1170
+ frequency terms. For an FFT of length :attr:`n` and with inputs spaced in
1171
+ length unit :attr:`d`, the frequencies are::
1172
+
1173
+ f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
1174
+
1175
+ Note:
1176
+ For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
1177
+ either negative or positive. :func:`~torch.fft.fftfreq` follows NumPy's
1178
+ convention of taking it to be negative.
1179
+
1180
+ Args:
1181
+ n (int): the FFT length
1182
+ d (float, optional): The sampling length scale.
1183
+ The spacing between individual samples of the FFT input.
1184
+ The default assumes unit spacing, dividing that result by the actual
1185
+ spacing gives the result in physical frequency units.
1186
+
1187
+ Keyword Args:
1188
+ {out}
1189
+ {dtype}
1190
+ {layout}
1191
+ {device}
1192
+ {requires_grad}
1193
+
1194
+ Example:
1195
+
1196
+ >>> torch.fft.fftfreq(5)
1197
+ tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
1198
+
1199
+ For even input, we can see the Nyquist frequency at ``f[2]`` is given as
1200
+ negative:
1201
+
1202
+ >>> torch.fft.fftfreq(4)
1203
+ tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
1204
+
1205
+ """.format(**factory_common_args))
1206
+
1207
+ rfftfreq = _add_docstr(_fft.fft_rfftfreq, r"""
1208
+ rfftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
1209
+
1210
+ Computes the sample frequencies for :func:`~torch.fft.rfft` with a signal of size :attr:`n`.
1211
+
1212
+ Note:
1213
+ :func:`~torch.fft.rfft` returns Hermitian one-sided output, so only the
1214
+ positive frequency terms are returned. For a real FFT of length :attr:`n`
1215
+ and with inputs spaced in length unit :attr:`d`, the frequencies are::
1216
+
1217
+ f = torch.arange((n + 1) // 2) / (d * n)
1218
+
1219
+ Note:
1220
+ For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
1221
+ either negative or positive. Unlike :func:`~torch.fft.fftfreq`,
1222
+ :func:`~torch.fft.rfftfreq` always returns it as positive.
1223
+
1224
+ Args:
1225
+ n (int): the real FFT length
1226
+ d (float, optional): The sampling length scale.
1227
+ The spacing between individual samples of the FFT input.
1228
+ The default assumes unit spacing, dividing that result by the actual
1229
+ spacing gives the result in physical frequency units.
1230
+
1231
+ Keyword Args:
1232
+ {out}
1233
+ {dtype}
1234
+ {layout}
1235
+ {device}
1236
+ {requires_grad}
1237
+
1238
+ Example:
1239
+
1240
+ >>> torch.fft.rfftfreq(5)
1241
+ tensor([0.0000, 0.2000, 0.4000])
1242
+
1243
+ >>> torch.fft.rfftfreq(4)
1244
+ tensor([0.0000, 0.2500, 0.5000])
1245
+
1246
+ Compared to the output from :func:`~torch.fft.fftfreq`, we see that the
1247
+ Nyquist frequency at ``f[2]`` has changed sign:
1248
+ >>> torch.fft.fftfreq(4)
1249
+ tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
1250
+
1251
+ """.format(**factory_common_args))
1252
+
1253
+ fftshift = _add_docstr(_fft.fft_fftshift, r"""
1254
+ fftshift(input, dim=None) -> Tensor
1255
+
1256
+ Reorders n-dimensional FFT data, as provided by :func:`~torch.fft.fftn`, to have
1257
+ negative frequency terms first.
1258
+
1259
+ This performs a periodic shift of n-dimensional data such that the origin
1260
+ ``(0, ..., 0)`` is moved to the center of the tensor. Specifically, to
1261
+ ``input.shape[dim] // 2`` in each selected dimension.
1262
+
1263
+ Note:
1264
+ By convention, the FFT returns positive frequency terms first, followed by
1265
+ the negative frequencies in reverse order, so that ``f[-i]`` for all
1266
+ :math:`0 < i \leq n/2` in Python gives the negative frequency terms.
1267
+ :func:`~torch.fft.fftshift` rearranges all frequencies into ascending order
1268
+ from negative to positive with the zero-frequency term in the center.
1269
+
1270
+ Note:
1271
+ For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
1272
+ either negative or positive. :func:`~torch.fft.fftshift` always puts the
1273
+ Nyquist term at the 0-index. This is the same convention used by
1274
+ :func:`~torch.fft.fftfreq`.
1275
+
1276
+ Args:
1277
+ input (Tensor): the tensor in FFT order
1278
+ dim (int, Tuple[int], optional): The dimensions to rearrange.
1279
+ Only dimensions specified here will be rearranged, any other dimensions
1280
+ will be left in their original order.
1281
+ Default: All dimensions of :attr:`input`.
1282
+
1283
+ Example:
1284
+
1285
+ >>> f = torch.fft.fftfreq(4)
1286
+ >>> f
1287
+ tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
1288
+
1289
+ >>> torch.fft.fftshift(f)
1290
+ tensor([-0.5000, -0.2500, 0.0000, 0.2500])
1291
+
1292
+ Also notice that the Nyquist frequency term at ``f[2]`` was moved to the
1293
+ beginning of the tensor.
1294
+
1295
+ This also works for multi-dimensional transforms:
1296
+
1297
+ >>> x = torch.fft.fftfreq(5, d=1/5) + 0.1 * torch.fft.fftfreq(5, d=1/5).unsqueeze(1)
1298
+ >>> x
1299
+ tensor([[ 0.0000, 1.0000, 2.0000, -2.0000, -1.0000],
1300
+ [ 0.1000, 1.1000, 2.1000, -1.9000, -0.9000],
1301
+ [ 0.2000, 1.2000, 2.2000, -1.8000, -0.8000],
1302
+ [-0.2000, 0.8000, 1.8000, -2.2000, -1.2000],
1303
+ [-0.1000, 0.9000, 1.9000, -2.1000, -1.1000]])
1304
+
1305
+ >>> torch.fft.fftshift(x)
1306
+ tensor([[-2.2000, -1.2000, -0.2000, 0.8000, 1.8000],
1307
+ [-2.1000, -1.1000, -0.1000, 0.9000, 1.9000],
1308
+ [-2.0000, -1.0000, 0.0000, 1.0000, 2.0000],
1309
+ [-1.9000, -0.9000, 0.1000, 1.1000, 2.1000],
1310
+ [-1.8000, -0.8000, 0.2000, 1.2000, 2.2000]])
1311
+
1312
+ :func:`~torch.fft.fftshift` can also be useful for spatial data. If our
1313
+ data is defined on a centered grid (``[-(N//2), (N-1)//2]``) then we can
1314
+ use the standard FFT defined on an uncentered grid (``[0, N)``) by first
1315
+ applying an :func:`~torch.fft.ifftshift`.
1316
+
1317
+ >>> x_centered = torch.arange(-5, 5)
1318
+ >>> x_uncentered = torch.fft.ifftshift(x_centered)
1319
+ >>> fft_uncentered = torch.fft.fft(x_uncentered)
1320
+
1321
+ Similarly, we can convert the frequency domain components to centered
1322
+ convention by applying :func:`~torch.fft.fftshift`.
1323
+
1324
+ >>> fft_centered = torch.fft.fftshift(fft_uncentered)
1325
+
1326
+ The inverse transform, from centered Fourier space back to centered spatial
1327
+ data, can be performed by applying the inverse shifts in reverse order:
1328
+
1329
+ >>> x_centered_2 = torch.fft.fftshift(torch.fft.ifft(torch.fft.ifftshift(fft_centered)))
1330
+ >>> torch.testing.assert_close(x_centered.to(torch.complex64), x_centered_2, check_stride=False)
1331
+
1332
+
1333
+ """)
1334
+
1335
+ ifftshift = _add_docstr(_fft.fft_ifftshift, r"""
1336
+ ifftshift(input, dim=None) -> Tensor
1337
+
1338
+ Inverse of :func:`~torch.fft.fftshift`.
1339
+
1340
+ Args:
1341
+ input (Tensor): the tensor in FFT order
1342
+ dim (int, Tuple[int], optional): The dimensions to rearrange.
1343
+ Only dimensions specified here will be rearranged, any other dimensions
1344
+ will be left in their original order.
1345
+ Default: All dimensions of :attr:`input`.
1346
+
1347
+ Example:
1348
+
1349
+ >>> f = torch.fft.fftfreq(5)
1350
+ >>> f
1351
+ tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
1352
+
1353
+ A round-trip through :func:`~torch.fft.fftshift` and
1354
+ :func:`~torch.fft.ifftshift` gives the same result:
1355
+
1356
+ >>> shifted = torch.fft.fftshift(f)
1357
+ >>> torch.fft.ifftshift(shifted)
1358
+ tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
1359
+
1360
+ """)
llmeval-env/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (54.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/futures/__init__.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
4
+
5
+ import torch
6
+
7
+ __all__ = ['Future', 'collect_all', 'wait_all']
8
+
9
+ T = TypeVar("T")
10
+ S = TypeVar("S")
11
+
12
+
13
+ class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
14
+ pass
15
+
16
+
17
+ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
18
+ r"""
19
+ Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
20
+ execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
21
+ also exposes a set of APIs to add callback functions and set results.
22
+
23
+ .. warning:: GPU support is a beta feature, subject to changes.
24
+ """
25
+
26
+ def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
27
+ r"""
28
+ Create an empty unset ``Future``. If the future is intended to hold
29
+ values containing CUDA tensors, (a superset of) their CUDA devices must
30
+ be specified at construction. (This is only supported if
31
+ ``torch.cuda.is_available()`` returns ``True``). This is needed to
32
+ ensure proper CUDA stream synchronization. The child futures, returned
33
+ by the ``then`` method, will inherit these devices.
34
+
35
+ Args:
36
+ devices(``List[Union[int, str, torch.device]]``, optional): the set
37
+ of devices on which tensors contained in this future's value are
38
+ allowed to reside and on which callbacks are allowed to operate.
39
+ """
40
+ if devices is None:
41
+ devices = []
42
+ super().__init__([torch.device(d) for d in devices])
43
+
44
+ def done(self) -> bool:
45
+ r"""
46
+ Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
47
+ has a result or an exception.
48
+
49
+ If the value contains tensors that reside on GPUs, ``Future.done()``
50
+ will return ``True`` even if the asynchronous kernels that are
51
+ populating those tensors haven't yet completed running on the device,
52
+ because at such stage the result is already usable, provided one
53
+ performs the appropriate synchronizations (see :meth:`wait`).
54
+ """
55
+ return super().done()
56
+
57
+ def wait(self) -> T:
58
+ r"""
59
+ Block until the value of this ``Future`` is ready.
60
+
61
+ If the value contains tensors that reside on GPUs, then an additional
62
+ synchronization is performed with the kernels (executing on the device)
63
+ which may be asynchronously populating those tensors. Such sync is
64
+ non-blocking, which means that ``wait()`` will insert the necessary
65
+ instructions in the current streams to ensure that further operations
66
+ enqueued on those streams will be properly scheduled after the async
67
+ kernels but, once that is done, ``wait()`` will return, even if those
68
+ kernels are still running. No further synchronization is required when
69
+ accessing and using the values, as long as one doesn't change streams.
70
+
71
+ Returns:
72
+ The value held by this ``Future``. If the function (callback or RPC)
73
+ creating the value has thrown an error, this ``wait`` method will
74
+ also throw an error.
75
+ """
76
+ return super().wait()
77
+
78
+ def value(self) -> T:
79
+ r"""
80
+ Obtain the value of an already-completed future.
81
+
82
+ This method should only be called after a call to :meth:`wait` has
83
+ completed, or inside a callback function passed to :meth:`then`. In
84
+ other cases this ``Future`` may not yet hold a value and calling
85
+ ``value()`` could fail.
86
+
87
+ If the value contains tensors that reside on GPUs, then this method will
88
+ *not* perform any additional synchronization. This should be done
89
+ beforehand, separately, through a call to :meth:`wait` (except within
90
+ callbacks, for which it's already being taken care of by :meth:`then`).
91
+
92
+ Returns:
93
+ The value held by this ``Future``. If the function (callback or RPC)
94
+ creating the value has thrown an error, this ``value()`` method will
95
+ also throw an error.
96
+ """
97
+ return super().value()
98
+
99
+ def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
100
+ r"""
101
+ Append the given callback function to this ``Future``, which will be run
102
+ when the ``Future`` is completed. Multiple callbacks can be added to
103
+ the same ``Future``, but the order in which they will be executed cannot
104
+ be guaranteed (to enforce a certain order consider chaining:
105
+ ``fut.then(cb1).then(cb2)``). The callback must take one argument, which
106
+ is the reference to this ``Future``. The callback function can use the
107
+ :meth:`value` method to get the value. Note that if this ``Future`` is
108
+ already completed, the given callback will be run immediately inline.
109
+
110
+ If the ``Future``'s value contains tensors that reside on GPUs, the
111
+ callback might be invoked while the async kernels that are populating
112
+ those tensors haven't yet finished executing on the device. However, the
113
+ callback will be invoked with some dedicated streams set as current
114
+ (fetched from a global pool) which will be synchronized with those
115
+ kernels. Hence any operation performed by the callback on these tensors
116
+ will be scheduled on the device after the kernels complete. In other
117
+ words, as long as the callback doesn't switch streams, it can safely
118
+ manipulate the result without any additional synchronization. This is
119
+ similar to the non-blocking behavior of :meth:`wait`.
120
+
121
+ Similarly, if the callback returns a value that contains tensors that
122
+ reside on a GPU, it can do so even if the kernels that are producing
123
+ these tensors are still running on the device, as long as the callback
124
+ didn't change streams during its execution. If one wants to change
125
+ streams, one must be careful to re-synchronize them with the original
126
+ streams, that is, those that were current when the callback was invoked.
127
+
128
+ Args:
129
+ callback(``Callable``): a ``Callable`` that takes this ``Future`` as
130
+ the only argument.
131
+
132
+ Returns:
133
+ A new ``Future`` object that holds the return value of the
134
+ ``callback`` and will be marked as completed when the given
135
+ ``callback`` finishes.
136
+
137
+ .. note:: Note that if the callback function throws, either
138
+ through the original future being completed with an exception and
139
+ calling ``fut.wait()``, or through other code in the callback, the
140
+ future returned by ``then`` will be marked appropriately with the
141
+ encountered error. However, if this callback later completes
142
+ additional futures, those futures are not marked as completed with
143
+ an error and the user is responsible for handling completion/waiting
144
+ on those futures independently.
145
+
146
+ Example::
147
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
148
+ >>> def callback(fut):
149
+ ... print(f"RPC return value is {fut.wait()}.")
150
+ >>> fut = torch.futures.Future()
151
+ >>> # The inserted callback will print the return value when
152
+ >>> # receiving the response from "worker1"
153
+ >>> cb_fut = fut.then(callback)
154
+ >>> chain_cb_fut = cb_fut.then(
155
+ ... lambda x : print(f"Chained cb done. {x.wait()}")
156
+ ... )
157
+ >>> fut.set_result(5)
158
+ RPC return value is 5.
159
+ Chained cb done. None
160
+ """
161
+ return cast(Future[S], super().then(callback))
162
+
163
+ def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
164
+ r"""
165
+ Append the given callback function to this ``Future``, which will be run
166
+ when the ``Future`` is completed. Multiple callbacks can be added to
167
+ the same ``Future``, but the order in which they will be executed cannot
168
+ be guaranteed. The callback must take one argument, which is the
169
+ reference to this ``Future``. The callback function can use the
170
+ :meth:`value` method to get the value. Note that if this ``Future`` is
171
+ already completed, the given callback will be run inline.
172
+
173
+ We recommend that you use the :meth:`then` method as it provides a way
174
+ to synchronize after your callback has completed. ``add_done_callback``
175
+ can be cheaper if your callback does not return anything. But both
176
+ :meth:`then` and ``add_done_callback`` use the same callback
177
+ registration API under the hood.
178
+
179
+ With respect to GPU tensors, this method behaves in the same way as
180
+ :meth:`then`.
181
+
182
+ Args:
183
+ callback(``Future``): a ``Callable`` that takes in one argument,
184
+ which is the reference to this ``Future``.
185
+
186
+ .. note:: Note that if the callback function throws, either
187
+ through the original future being completed with an exception and
188
+ calling ``fut.wait()``, or through other code in the callback,
189
+ error handling must be carefully taken care of. For example, if
190
+ this callback later completes additional futures, those futures are
191
+ not marked as completed with an error and the user is responsible
192
+ for handling completion/waiting on those futures independently.
193
+
194
+ Example::
195
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
196
+ >>> def callback(fut):
197
+ ... print("This will run after the future has finished.")
198
+ ... print(fut.wait())
199
+ >>> fut = torch.futures.Future()
200
+ >>> fut.add_done_callback(callback)
201
+ >>> fut.set_result(5)
202
+ This will run after the future has finished.
203
+ 5
204
+ """
205
+ super().add_done_callback(callback)
206
+
207
+ def set_result(self, result: T) -> None:
208
+ r"""
209
+ Set the result for this ``Future``, which will mark this ``Future`` as
210
+ completed and trigger all attached callbacks. Note that a ``Future``
211
+ cannot be marked completed twice.
212
+
213
+ If the result contains tensors that reside on GPUs, this method can be
214
+ called even if the asynchronous kernels that are populating those
215
+ tensors haven't yet completed running on the device, provided that the
216
+ streams on which those kernels were enqueued are set as the current ones
217
+ when this method is called. Put simply, it's safe to call this method
218
+ immediately after launching those kernels, without any additional
219
+ synchronization, as long as one doesn't change streams in between. This
220
+ method will record events on all the relevant current streams and will
221
+ use them to ensure proper scheduling for all the consumers of this
222
+ ``Future``.
223
+
224
+ Args:
225
+ result (object): the result object of this ``Future``.
226
+
227
+ Example::
228
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
229
+ >>> import threading
230
+ >>> import time
231
+ >>> def slow_set_future(fut, value):
232
+ ... time.sleep(0.5)
233
+ ... fut.set_result(value)
234
+ >>> fut = torch.futures.Future()
235
+ >>> t = threading.Thread(
236
+ ... target=slow_set_future,
237
+ ... args=(fut, torch.ones(2) * 3)
238
+ ... )
239
+ >>> t.start()
240
+ >>> print(fut.wait())
241
+ tensor([3., 3.])
242
+ >>> t.join()
243
+ """
244
+ super().set_result(result)
245
+
246
+ def set_exception(self, result: T) -> None:
247
+ r"""
248
+ Set an exception for this ``Future``, which will mark this ``Future`` as
249
+ completed with an error and trigger all attached callbacks. Note that
250
+ when calling wait()/value() on this ``Future``, the exception set here
251
+ will be raised inline.
252
+
253
+ Args:
254
+ result (BaseException): the exception for this ``Future``.
255
+
256
+ Example::
257
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
258
+ >>> fut = torch.futures.Future()
259
+ >>> fut.set_exception(ValueError("foo"))
260
+ >>> fut.wait()
261
+ Traceback (most recent call last):
262
+ ...
263
+ ValueError: foo
264
+ """
265
+ assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
266
+
267
+ def raise_error(fut_result):
268
+ raise fut_result
269
+
270
+ super()._set_unwrap_func(raise_error)
271
+ self.set_result(result) # type: ignore[arg-type]
272
+
273
+
274
+ def collect_all(futures: List[Future]) -> Future[List[Future]]:
275
+ r"""
276
+ Collects the provided :class:`~torch.futures.Future` objects into a single
277
+ combined :class:`~torch.futures.Future` that is completed when all of the
278
+ sub-futures are completed.
279
+
280
+ Args:
281
+ futures (list): a list of :class:`~torch.futures.Future` objects.
282
+
283
+ Returns:
284
+ Returns a :class:`~torch.futures.Future` object to a list of the passed
285
+ in Futures.
286
+
287
+ Example::
288
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
289
+ >>> fut0 = torch.futures.Future()
290
+ >>> fut1 = torch.futures.Future()
291
+ >>> fut = torch.futures.collect_all([fut0, fut1])
292
+ >>> fut0.set_result(0)
293
+ >>> fut1.set_result(1)
294
+ >>> fut_list = fut.wait()
295
+ >>> print(f"fut0 result = {fut_list[0].wait()}")
296
+ fut0 result = 0
297
+ >>> print(f"fut1 result = {fut_list[1].wait()}")
298
+ fut1 result = 1
299
+ """
300
+ return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
301
+
302
+
303
+ def wait_all(futures: List[Future]) -> List:
304
+ r"""
305
+ Waits for all provided futures to be complete, and returns
306
+ the list of completed values. If any of the futures encounters an error,
307
+ the method will exit early and report the error not waiting for other
308
+ futures to complete.
309
+
310
+ Args:
311
+ futures (list): a list of :class:`~torch.futures.Future` object.
312
+
313
+ Returns:
314
+ A list of the completed :class:`~torch.futures.Future` results. This
315
+ method will throw an error if ``wait`` on any
316
+ :class:`~torch.futures.Future` throws.
317
+ """
318
+ return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
llmeval-env/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+ from .parameter import (
3
+ Parameter as Parameter,
4
+ UninitializedParameter as UninitializedParameter,
5
+ UninitializedBuffer as UninitializedBuffer,
6
+ )
7
+ from .parallel import DataParallel as DataParallel
8
+ from . import init
9
+ from . import functional
10
+ from . import utils
11
+ from . import attention
12
+
13
+
14
+ def factory_kwargs(kwargs):
15
+ r"""Return a canonicalized dict of factory kwargs.
16
+
17
+ Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed
18
+ to factory functions like torch.empty, or errors if unrecognized kwargs are present.
19
+
20
+ This function makes it simple to write code like this::
21
+
22
+ class MyModule(nn.Module):
23
+ def __init__(self, **kwargs):
24
+ factory_kwargs = torch.nn.factory_kwargs(kwargs)
25
+ self.weight = Parameter(torch.empty(10, **factory_kwargs))
26
+
27
+ Why should you use this function instead of just passing `kwargs` along directly?
28
+
29
+ 1. This function does error validation, so if there are unexpected kwargs we will
30
+ immediately report an error, instead of deferring it to the factory call
31
+ 2. This function supports a special `factory_kwargs` argument, which can be used to
32
+ explicitly specify a kwarg to be used for factory functions, in the event one of the
33
+ factory kwargs conflicts with an already existing argument in the signature (e.g.
34
+ in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory
35
+ functions, as distinct from the dtype argument, by saying
36
+ ``f(dtype1, factory_kwargs={"dtype": dtype2})``)
37
+ """
38
+ if kwargs is None:
39
+ return {}
40
+ simple_keys = {"device", "dtype", "memory_format"}
41
+ expected_keys = simple_keys | {"factory_kwargs"}
42
+ if not kwargs.keys() <= expected_keys:
43
+ raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}")
44
+
45
+ # guarantee no input kwargs is untouched
46
+ r = dict(kwargs.get("factory_kwargs", {}))
47
+ for k in simple_keys:
48
+ if k in kwargs:
49
+ if k in r:
50
+ raise TypeError(f"{k} specified twice, in **kwargs and in factory_kwargs")
51
+ r[k] = kwargs[k]
52
+
53
+ return r
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (3.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc ADDED
Binary file (9.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/_reduction.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import warnings
3
+
4
+ # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
5
+
6
+
7
+ def get_enum(reduction: str) -> int:
8
+ if reduction == 'none':
9
+ ret = 0
10
+ elif reduction == 'mean':
11
+ ret = 1
12
+ elif reduction == 'elementwise_mean':
13
+ warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
14
+ ret = 1
15
+ elif reduction == 'sum':
16
+ ret = 2
17
+ else:
18
+ ret = -1 # TODO: remove once JIT exceptions support control flow
19
+ raise ValueError(f"{reduction} is not a valid value for reduction")
20
+ return ret
21
+
22
+ # In order to support previous versions, accept boolean size_average and reduce
23
+ # and convert them into the new constants for now
24
+
25
+
26
+ # We use these functions in torch/legacy as well, in which case we'll silence the warning
27
+ def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
28
+ warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
29
+
30
+ if size_average is None:
31
+ size_average = True
32
+ if reduce is None:
33
+ reduce = True
34
+
35
+ if size_average and reduce:
36
+ ret = 'mean'
37
+ elif reduce:
38
+ ret = 'sum'
39
+ else:
40
+ ret = 'none'
41
+ if emit_warning:
42
+ warnings.warn(warning.format(ret))
43
+ return ret
44
+
45
+
46
+ def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
47
+ return get_enum(legacy_get_string(size_average, reduce, emit_warning))
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This module contains functions and classes that alter the behavior of torch.nn.functional.scaled_dot_product_attention """
2
+ import contextlib
3
+ from typing import List, Union
4
+ from warnings import warn
5
+
6
+ from torch.backends.cuda import (
7
+ can_use_efficient_attention,
8
+ can_use_flash_attention,
9
+ enable_flash_sdp,
10
+ enable_math_sdp,
11
+ enable_mem_efficient_sdp,
12
+ flash_sdp_enabled,
13
+ math_sdp_enabled,
14
+ mem_efficient_sdp_enabled,
15
+ SDPAParams,
16
+ )
17
+
18
+ __all__: List[str] = ["SDPBackend", "sdpa_kernel", "WARN_FOR_UNFUSED_KERNELS"]
19
+
20
+ # Note: [SDPA warnings]
21
+ # TODO: Consider using this for sdpa regardless of subclasses
22
+ # This only effects users of bias subclasses
23
+ # If this is set to True, we will warn the user if they are not using the fused kernels
24
+ # As well, it will raise warnings for all the reasons why the fused kernels can't be run.
25
+ # To set this to True, run
26
+ # torch.nn.attention.WARN_FOR_UNFUSED_KERNELS = True
27
+ WARN_FOR_UNFUSED_KERNELS = False
28
+
29
+
30
+ from torch._C import _SDPBackend as SDPBackend
31
+
32
+ # Hacks for Sphinx documentation:
33
+ # https://stackoverflow.com/questions/38765577/overriding-sphinx-autodoc-alias-of-for-import-of-private-class
34
+ SDPBackend = SDPBackend
35
+ r"""An enum-like class that contains the different backends for scaled dot product attention.
36
+ This backend class is designed to be used with the sdpa_kernel context manager.
37
+
38
+ The following Enums are available:
39
+ - ERROR: An error occurred when trying to determine the backend.
40
+ - MATH: The math backend for scaled dot product attention.
41
+ - FLASH_ATTENTION: The flash attention backend for scaled dot product attention.
42
+ - EFFICIENT_ATTENTION: The efficient attention backend for scaled dot product attention.
43
+ - CUDNN_ATTENTION: The cuDNN backend for scaled dot product attention.
44
+
45
+ See :func:`torch.nn.attention.sdpa_kernel` for more details.
46
+
47
+ .. warning:: This class is in beta and subject to change.
48
+ """
49
+ SDPBackend.__module__ = __name__
50
+ SDPBackend.__name__ = "SDPBackend"
51
+
52
+
53
+ def _raise_kernel_warnings(params: SDPAParams) -> None:
54
+ """
55
+ If WARN_FOR_UNFUSED_KERNELS is set to True, this will raise warnings
56
+ for all the reasons why the fused kernels can't be run. If using subclasses
57
+ """
58
+ if WARN_FOR_UNFUSED_KERNELS:
59
+ if not can_use_efficient_attention(params):
60
+ warn("Efficient attention can't be used because:")
61
+ can_use_efficient_attention(params, True)
62
+ if not can_use_flash_attention(params):
63
+ warn("Flash attention can't be used because:")
64
+ can_use_flash_attention(params, True)
65
+
66
+
67
+ @contextlib.contextmanager
68
+ def sdpa_kernel(backends: Union[List[SDPBackend], SDPBackend]):
69
+ r"""
70
+ Context manager to select which backend to use for scaled dot product attention.
71
+
72
+ .. warning:: This function is beta and subject to change.
73
+
74
+ Args:
75
+ backend (Union[List[SDPBackend], SDPBackend]): A backend or list of backends for scaled dot product attention.
76
+
77
+ Example:
78
+
79
+ .. code-block:: python
80
+
81
+ from torch.nn.functional import scaled_dot_product_attention
82
+ from torch.nn.attention import SDPBackend, sdpa_kernel
83
+ # Only enable flash attention backend
84
+ with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
85
+ scaled_dot_product_attention(...)
86
+
87
+ # Enable the Math or Efficient attention backends
88
+ with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]):
89
+ scaled_dot_product_attention(...)
90
+
91
+ This context manager can be used to select which backend to use for scaled dot product attention.
92
+ Upon exiting the context manager, the previous state of the flags will be restored, enabling all backends.
93
+ """
94
+ assert isinstance(
95
+ backends, (list, SDPBackend)
96
+ ), "Backend must be an instance of SDPBackend or a list of SDPBackend instances"
97
+
98
+ if isinstance(backends, SDPBackend):
99
+ backends = [backends]
100
+
101
+ backends = set(backends)
102
+ previous_flash: bool = flash_sdp_enabled()
103
+ previous_mem_efficient: bool = mem_efficient_sdp_enabled()
104
+ previous_math: bool = math_sdp_enabled()
105
+ try:
106
+ enable_flash = SDPBackend.FLASH_ATTENTION in backends
107
+ enable_mem_efficient = SDPBackend.EFFICIENT_ATTENTION in backends
108
+ enable_math = SDPBackend.MATH in backends
109
+
110
+ enable_flash_sdp(enable_flash)
111
+ enable_mem_efficient_sdp(enable_mem_efficient)
112
+ enable_math_sdp(enable_math)
113
+ yield {}
114
+ finally:
115
+ enable_flash_sdp(previous_flash)
116
+ enable_mem_efficient_sdp(previous_mem_efficient)
117
+ enable_math_sdp(previous_math)
llmeval-env/lib/python3.10/site-packages/torch/nn/common_types.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar, Union, Tuple, Optional
2
+ from .. import Tensor
3
+
4
+ # Create some useful type aliases
5
+
6
+ # Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally
7
+ # broadcast to a tuple.
8
+ # Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations.
9
+ T = TypeVar('T')
10
+ _scalar_or_tuple_any_t = Union[T, Tuple[T, ...]]
11
+ _scalar_or_tuple_1_t = Union[T, Tuple[T]]
12
+ _scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
13
+ _scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
14
+ _scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
15
+ _scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
16
+ _scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]
17
+
18
+ # For arguments which represent size parameters (eg, kernel size, padding)
19
+ _size_any_t = _scalar_or_tuple_any_t[int]
20
+ _size_1_t = _scalar_or_tuple_1_t[int]
21
+ _size_2_t = _scalar_or_tuple_2_t[int]
22
+ _size_3_t = _scalar_or_tuple_3_t[int]
23
+ _size_4_t = _scalar_or_tuple_4_t[int]
24
+ _size_5_t = _scalar_or_tuple_5_t[int]
25
+ _size_6_t = _scalar_or_tuple_6_t[int]
26
+
27
+ # For arguments which represent optional size parameters (eg, adaptive pool parameters)
28
+ _size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]]
29
+ _size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]]
30
+ _size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]]
31
+
32
+ # For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
33
+ _ratio_2_t = _scalar_or_tuple_2_t[float]
34
+ _ratio_3_t = _scalar_or_tuple_3_t[float]
35
+ _ratio_any_t = _scalar_or_tuple_any_t[float]
36
+
37
+ _tensor_list_t = _scalar_or_tuple_any_t[Tensor]
38
+
39
+ # For the return value of max pooling operations that may or may not return indices.
40
+ # With the proposed 'Literal' feature to Python typing, it might be possible to
41
+ # eventually eliminate this.
42
+ _maybe_indices_t = _scalar_or_tuple_2_t[Tensor]
llmeval-env/lib/python3.10/site-packages/torch/nn/cpp.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functionality for Python <-> C++ frontend inter-op."""
2
+
3
+ from torch import nn
4
+
5
+
6
+ class OrderedDictWrapper:
7
+ """A wrapper around a C++ OrderedDict.
8
+
9
+ It dynamically evaluates the OrderedDict getter on a bound C++ module, such
10
+ that new changes on the C++ side are picked up. Otherwise accessing e.g.
11
+ ``cpp_module._parameters`` just once would get a frozen copy of the parameters
12
+ at the time of access. ``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__``
13
+ so using properties does not work.
14
+ """
15
+
16
+ def __init__(self, cpp_module, attr):
17
+ self.cpp_module = cpp_module
18
+ self.attr = attr
19
+
20
+ @property
21
+ def cpp_dict(self):
22
+ return getattr(self.cpp_module, self.attr)
23
+
24
+ # Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
25
+ # must manually override them.
26
+
27
+ def items(self):
28
+ return self.cpp_dict.items()
29
+
30
+ def keys(self):
31
+ return self.cpp_dict.keys()
32
+
33
+ def values(self):
34
+ return self.cpp_dict.values()
35
+
36
+ def __iter__(self):
37
+ return self.cpp_dict.__iter__()
38
+
39
+ def __len__(self):
40
+ return self.cpp_dict.__len__()
41
+
42
+ def __contains__(self, key):
43
+ return self.cpp_dict.__contains__(key)
44
+
45
+ def __getitem__(self, key):
46
+ return self.cpp_dict.__getitem__(key)
47
+
48
+
49
+ class ModuleWrapper(nn.Module):
50
+ """A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and delegates all access."""
51
+
52
+ def __init__(self, cpp_module):
53
+ # Assign before the super class constructor so ``self.training`` can be
54
+ # assigned to in the super class constructor.
55
+ self.cpp_module = cpp_module
56
+ super().__init__()
57
+ self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
58
+ self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
59
+ self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
60
+ for attr in dir(cpp_module):
61
+ # Skip magic methods and the three attributes above.
62
+ if not attr.startswith("_"):
63
+ setattr(self, attr, getattr(self.cpp_module, attr))
64
+
65
+ def _apply(self, fn, recurse=True):
66
+ for param in self.parameters():
67
+ # Tensors stored in modules are graph leaves, and we don't
68
+ # want to create copy nodes, so we have to unpack the data.
69
+ param.data = fn(param.data)
70
+ if param._grad is not None:
71
+ param._grad.data = fn(param._grad.data)
72
+
73
+ for buf in self.buffers():
74
+ buf.data = fn(buf.data)
75
+
76
+ return self
77
+
78
+ # nn.Module defines training as a boolean
79
+ @property # type: ignore[override]
80
+ def training(self):
81
+ return self.cpp_module.training
82
+
83
+ @training.setter
84
+ def training(self, mode):
85
+ self.cpp_module.train(mode)
86
+
87
+ def __repr__(self):
88
+ return self.cpp_module.__repr__()
llmeval-env/lib/python3.10/site-packages/torch/nn/functional.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/nn/functional.pyi ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Any,
3
+ Callable,
4
+ Dict,
5
+ List,
6
+ Literal,
7
+ Optional,
8
+ overload,
9
+ Sequence,
10
+ Tuple,
11
+ Union,
12
+ )
13
+
14
+ from torch import Tensor
15
+ from torch.types import _dtype, _int, _size
16
+
17
+ from .common_types import (
18
+ _ratio_any_t,
19
+ _size_1_t,
20
+ _size_2_opt_t,
21
+ _size_2_t,
22
+ _size_3_opt_t,
23
+ _size_3_t,
24
+ _size_any_t,
25
+ )
26
+
27
+ # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
28
+ # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
29
+ # is wide-spread.
30
+
31
+ # from mypy_extensions import TypedDict
32
+
33
+ # GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int})
34
+ # GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int})
35
+
36
+ GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int]
37
+ GRID_SAMPLE_PADDING_MODES = Dict[str, int]
38
+
39
+ # These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning.
40
+ #
41
+ # The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate.
42
+ # This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence`
43
+ # type. There is no way to express the expected lengths of these lists in the current Python typing system.
44
+ #
45
+ # Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were
46
+ # deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code
47
+ # generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system
48
+ # to encode the type semantics of `_add_docstr`, should that system ever become widespread.
49
+ def fractional_max_pool2d_with_indices(
50
+ input: Tensor,
51
+ kernel_size: _size,
52
+ output_size: Optional[_size] = ...,
53
+ output_ratio: Optional[_ratio_any_t] = ...,
54
+ return_indices: bool = ...,
55
+ _random_samples: Optional[Tensor] = ...,
56
+ ) -> Tuple[Tensor, Tensor]: ...
57
+ def fractional_max_pool3d_with_indices(
58
+ input: Tensor,
59
+ kernel_size: _size,
60
+ output_size: Optional[_size] = ...,
61
+ output_ratio: Optional[_ratio_any_t] = ...,
62
+ return_indices: bool = ...,
63
+ _random_samples: Optional[Tensor] = ...,
64
+ ) -> Tuple[Tensor, Tensor]: ...
65
+ def max_pool1d_with_indices(
66
+ input: Tensor,
67
+ kernel_size: _size,
68
+ stride: Optional[_size] = ...,
69
+ padding: _size = ...,
70
+ dilation: _size = ...,
71
+ ceil_mode: bool = ...,
72
+ return_indices: bool = ...,
73
+ ) -> Tuple[Tensor, Tensor]: ...
74
+ def max_pool2d_with_indices(
75
+ input: Tensor,
76
+ kernel_size: _size,
77
+ stride: Optional[_size] = ...,
78
+ padding: _size = ...,
79
+ dilation: _size = ...,
80
+ ceil_mode: bool = ...,
81
+ return_indices: bool = ...,
82
+ ) -> Tuple[Tensor, Tensor]: ...
83
+ def max_pool3d_with_indices(
84
+ input: Tensor,
85
+ kernel_size: _size,
86
+ stride: Optional[_size] = ...,
87
+ padding: _size = ...,
88
+ dilation: _size = ...,
89
+ ceil_mode: bool = ...,
90
+ return_indices: bool = ...,
91
+ ) -> Tuple[Tensor, Tensor]: ...
92
+ def max_unpool1d(
93
+ input: Tensor,
94
+ indices: Tensor,
95
+ kernel_size: _size,
96
+ stride: Optional[_size] = ...,
97
+ padding: _size = ...,
98
+ output_size: Optional[_size] = ...,
99
+ ) -> Tensor: ...
100
+ def max_unpool2d(
101
+ input: Tensor,
102
+ indices: Tensor,
103
+ kernel_size: _size,
104
+ stride: Optional[_size] = ...,
105
+ padding: _size = ...,
106
+ output_size: Optional[_size] = ...,
107
+ ) -> Tensor: ...
108
+ def max_unpool3d(
109
+ input: Tensor,
110
+ indices: Tensor,
111
+ kernel_size: _size,
112
+ stride: Optional[_size] = ...,
113
+ padding: _size = ...,
114
+ output_size: Optional[_size] = ...,
115
+ ) -> Tensor: ...
116
+ def lp_pool1d(
117
+ input: Tensor,
118
+ norm_type: float,
119
+ kernel_size: _size_1_t,
120
+ stride: Union[Optional[_size], Optional[int]] = ...,
121
+ ceil_mode: bool = ...,
122
+ ) -> Tensor: ...
123
+ def lp_pool2d(
124
+ input: Tensor,
125
+ norm_type: float,
126
+ kernel_size: _size_2_t,
127
+ stride: Union[Optional[_size], Optional[int]] = ...,
128
+ ceil_mode: bool = ...,
129
+ ) -> Tensor: ...
130
+ def lp_pool3d(
131
+ input: Tensor,
132
+ norm_type: float,
133
+ kernel_size: _size_3_t,
134
+ stride: Union[Optional[_size], Optional[int]] = ...,
135
+ ceil_mode: bool = ...,
136
+ ) -> Tensor: ...
137
+ def adaptive_max_pool1d_with_indices(
138
+ input: Tensor,
139
+ output_size: _size,
140
+ return_indices: bool = ...,
141
+ ) -> Tuple[Tensor, Tensor]: ...
142
+ def adaptive_max_pool2d_with_indices(
143
+ input: Tensor,
144
+ output_size: _size_2_opt_t,
145
+ return_indices: bool = ...,
146
+ ) -> Tuple[Tensor, Tensor]: ...
147
+ def adaptive_max_pool3d_with_indices(
148
+ input: Tensor,
149
+ output_size: _size_3_opt_t,
150
+ return_indices: bool = ...,
151
+ ) -> Tuple[Tensor, Tensor]: ...
152
+ def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ...
153
+ def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ...
154
+ def dropout(
155
+ input: Tensor,
156
+ p: float = ...,
157
+ training: bool = ...,
158
+ inplace: bool = ...,
159
+ ) -> Tensor: ...
160
+ def alpha_dropout(
161
+ input: Tensor,
162
+ p: float = ...,
163
+ training: bool = ...,
164
+ inplace: bool = ...,
165
+ ) -> Tensor: ...
166
+ def dropout1d(
167
+ input: Tensor,
168
+ p: float = ...,
169
+ training: bool = ...,
170
+ inplace: bool = ...,
171
+ ) -> Tensor: ...
172
+ def dropout2d(
173
+ input: Tensor,
174
+ p: float = ...,
175
+ training: bool = ...,
176
+ inplace: bool = ...,
177
+ ) -> Tensor: ...
178
+ def dropout3d(
179
+ input: Tensor,
180
+ p: float = ...,
181
+ training: bool = ...,
182
+ inplace: bool = ...,
183
+ ) -> Tensor: ...
184
+ def feature_alpha_dropout(
185
+ input: Tensor,
186
+ p: float = ...,
187
+ training: bool = ...,
188
+ inplace: bool = ...,
189
+ ) -> Tensor: ...
190
+ def threshold(
191
+ input: Tensor,
192
+ threshold: float,
193
+ value: float,
194
+ inplace: bool = ...,
195
+ ) -> Tensor: ...
196
+ def relu(input: Tensor, inplace: bool = ...) -> Tensor: ...
197
+ def glu(input: Tensor, dim: int = ...) -> Tensor: ...
198
+ def hardtanh(
199
+ input: Tensor,
200
+ min_val: float = ...,
201
+ max_val: float = ...,
202
+ inplace: bool = ...,
203
+ ) -> Tensor: ...
204
+ def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ...
205
+ def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
206
+ def selu(input: Tensor, inplace: bool = ...) -> Tensor: ...
207
+ def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
208
+ def leaky_relu(
209
+ input: Tensor,
210
+ negative_slope: float = ...,
211
+ inplace: bool = ...,
212
+ ) -> Tensor: ...
213
+ def rrelu(
214
+ input: Tensor,
215
+ lower: float = ...,
216
+ upper: float = ...,
217
+ training: bool = ...,
218
+ inplace: bool = ...,
219
+ ) -> Tensor: ...
220
+ def tanhshrink(input: Any): ...
221
+ def softsign(input: Any): ...
222
+ def softmin(
223
+ input: Tensor,
224
+ dim: Optional[int] = ...,
225
+ _stacklevel: int = ...,
226
+ dtype: Optional[_dtype] = ...,
227
+ ) -> Tensor: ...
228
+ def softmax(
229
+ input: Tensor,
230
+ dim: Optional[int] = ...,
231
+ _stacklevel: int = ...,
232
+ dtype: Optional[_dtype] = ...,
233
+ ) -> Tensor: ...
234
+ def gumbel_softmax(
235
+ logits: Tensor,
236
+ tau: float = ...,
237
+ hard: bool = ...,
238
+ eps: float = ...,
239
+ dim: int = ...,
240
+ ) -> Tensor: ...
241
+ def log_softmax(
242
+ input: Tensor,
243
+ dim: Optional[int] = ...,
244
+ _stacklevel: int = ...,
245
+ dtype: Optional[_dtype] = ...,
246
+ ) -> Tensor: ...
247
+ def tanh(input: Any): ...
248
+ def sigmoid(input: Any) -> Tensor: ...
249
+ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ...
250
+ def silu(input: Tensor, inplace: bool = False) -> Tensor: ...
251
+ def mish(input: Tensor, inplace: bool = False) -> Tensor: ...
252
+ def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ...
253
+ def embedding(
254
+ input: Tensor,
255
+ weight: Tensor,
256
+ padding_idx: Optional[int] = ...,
257
+ max_norm: Optional[float] = ...,
258
+ norm_type: float = ...,
259
+ scale_grad_by_freq: bool = ...,
260
+ sparse: bool = ...,
261
+ ) -> Tensor: ...
262
+ def embedding_bag(
263
+ input: Tensor,
264
+ weight: Tensor,
265
+ offsets: Optional[Tensor] = ...,
266
+ max_norm: Optional[float] = ...,
267
+ norm_type: float = ...,
268
+ scale_grad_by_freq: bool = ...,
269
+ mode: str = ...,
270
+ sparse: bool = ...,
271
+ per_sample_weights: Optional[Tensor] = ...,
272
+ include_last_offset: bool = ...,
273
+ padding_idx: Optional[int] = ...,
274
+ ) -> Tensor: ...
275
+ def batch_norm(
276
+ input: Tensor,
277
+ running_mean: Optional[Tensor],
278
+ running_var: Optional[Tensor],
279
+ weight: Optional[Tensor] = ...,
280
+ bias: Optional[Tensor] = ...,
281
+ training: bool = ...,
282
+ momentum: float = ...,
283
+ eps: float = ...,
284
+ ) -> Tensor: ...
285
+ def instance_norm(
286
+ input: Tensor,
287
+ running_mean: Optional[Tensor] = ...,
288
+ running_var: Optional[Tensor] = ...,
289
+ weight: Optional[Tensor] = ...,
290
+ bias: Optional[Tensor] = ...,
291
+ use_input_stats: bool = ...,
292
+ momentum: float = ...,
293
+ eps: float = ...,
294
+ ) -> Tensor: ...
295
+ def layer_norm(
296
+ input: Tensor,
297
+ normalized_shape: Sequence[int],
298
+ weight: Optional[Tensor] = ...,
299
+ bias: Optional[Tensor] = ...,
300
+ eps: float = ...,
301
+ ) -> Tensor: ...
302
+ def group_norm(
303
+ input: Tensor,
304
+ num_groups: int,
305
+ weight: Optional[Tensor] = ...,
306
+ bias: Optional[Tensor] = ...,
307
+ eps: float = ...,
308
+ ) -> Tensor: ...
309
+ def local_response_norm(
310
+ input: Tensor,
311
+ size: int,
312
+ alpha: float = ...,
313
+ beta: float = ...,
314
+ k: float = ...,
315
+ ) -> Tensor: ...
316
+ def ctc_loss(
317
+ log_probs: Tensor,
318
+ targets: Tensor,
319
+ input_lengths: Tensor,
320
+ target_lengths: Tensor,
321
+ blank: int = ...,
322
+ reduction: str = ...,
323
+ zero_infinity: bool = ...,
324
+ ) -> Tensor: ...
325
+ def nll_loss(
326
+ input: Tensor,
327
+ target: Tensor,
328
+ weight: Optional[Tensor] = ...,
329
+ size_average: Optional[bool] = ...,
330
+ ignore_index: int = ...,
331
+ reduce: Optional[bool] = ...,
332
+ reduction: str = ...,
333
+ ) -> Tensor: ...
334
+ def poisson_nll_loss(
335
+ input: Tensor,
336
+ target: Tensor,
337
+ log_input: bool = ...,
338
+ full: bool = ...,
339
+ size_average: Optional[bool] = ...,
340
+ eps: float = ...,
341
+ reduce: Optional[bool] = ...,
342
+ reduction: str = ...,
343
+ ) -> Tensor: ...
344
+ def gaussian_nll_loss(
345
+ input: Tensor,
346
+ target: Tensor,
347
+ var: Tensor,
348
+ full: Optional[bool] = ...,
349
+ eps: Optional[float] = ...,
350
+ reduction: Optional[str] = ...,
351
+ ) -> Tensor: ...
352
+ def kl_div(
353
+ input: Tensor,
354
+ target: Tensor,
355
+ size_average: Optional[bool] = ...,
356
+ reduce: Optional[bool] = ...,
357
+ reduction: str = ...,
358
+ log_target: bool = ...,
359
+ ) -> Tensor: ...
360
+ def cross_entropy(
361
+ input: Tensor,
362
+ target: Tensor,
363
+ weight: Optional[Tensor] = ...,
364
+ size_average: Optional[bool] = ...,
365
+ ignore_index: int = ...,
366
+ reduce: Optional[bool] = ...,
367
+ reduction: str = ...,
368
+ label_smoothing: float = ...,
369
+ ) -> Tensor: ...
370
+ def binary_cross_entropy(
371
+ input: Tensor,
372
+ target: Tensor,
373
+ weight: Optional[Tensor] = ...,
374
+ size_average: Optional[bool] = ...,
375
+ reduce: Optional[bool] = ...,
376
+ reduction: str = ...,
377
+ ) -> Tensor: ...
378
+ def binary_cross_entropy_with_logits(
379
+ input: Tensor,
380
+ target: Tensor,
381
+ weight: Optional[Tensor] = ...,
382
+ size_average: Optional[bool] = ...,
383
+ reduce: Optional[bool] = ...,
384
+ reduction: str = ...,
385
+ pos_weight: Optional[Tensor] = ...,
386
+ ) -> Tensor: ...
387
+ def smooth_l1_loss(
388
+ input: Tensor,
389
+ target: Tensor,
390
+ size_average: Optional[bool] = ...,
391
+ reduce: Optional[bool] = ...,
392
+ reduction: str = ...,
393
+ beta: float = ...,
394
+ ) -> Tensor: ...
395
+ def huber_loss(
396
+ input: Tensor,
397
+ target: Tensor,
398
+ reduction: str = ...,
399
+ delta: float = ...,
400
+ ) -> Tensor: ...
401
+ def l1_loss(
402
+ input: Tensor,
403
+ target: Tensor,
404
+ size_average: Optional[bool] = ...,
405
+ reduce: Optional[bool] = ...,
406
+ reduction: str = ...,
407
+ ) -> Tensor: ...
408
+ def mse_loss(
409
+ input: Tensor,
410
+ target: Tensor,
411
+ size_average: Optional[bool] = ...,
412
+ reduce: Optional[bool] = ...,
413
+ reduction: str = ...,
414
+ ) -> Tensor: ...
415
+ def margin_ranking_loss(
416
+ input1: Tensor,
417
+ input2: Tensor,
418
+ target: Tensor,
419
+ margin: float = ...,
420
+ size_average: Optional[bool] = ...,
421
+ reduce: Optional[bool] = ...,
422
+ reduction: str = ...,
423
+ ) -> Tensor: ...
424
+ def hinge_embedding_loss(
425
+ input: Tensor,
426
+ target: Tensor,
427
+ margin: float = ...,
428
+ size_average: Optional[bool] = ...,
429
+ reduce: Optional[bool] = ...,
430
+ reduction: str = ...,
431
+ ) -> Tensor: ...
432
+ def multilabel_margin_loss(
433
+ input: Tensor,
434
+ target: Tensor,
435
+ size_average: Optional[bool] = ...,
436
+ reduce: Optional[bool] = ...,
437
+ reduction: str = ...,
438
+ ) -> Tensor: ...
439
+ def soft_margin_loss(
440
+ input: Tensor,
441
+ target: Tensor,
442
+ size_average: Optional[bool] = ...,
443
+ reduce: Optional[bool] = ...,
444
+ reduction: str = ...,
445
+ ) -> Tensor: ...
446
+ def multilabel_soft_margin_loss(
447
+ input: Tensor,
448
+ target: Tensor,
449
+ weight: Optional[Tensor] = ...,
450
+ size_average: Optional[bool] = ...,
451
+ reduce: Optional[bool] = ...,
452
+ reduction: str = ...,
453
+ ) -> Tensor: ...
454
+ def cosine_embedding_loss(
455
+ input1: Tensor,
456
+ input2: Tensor,
457
+ target: Tensor,
458
+ margin: float = ...,
459
+ size_average: Optional[bool] = ...,
460
+ reduce: Optional[bool] = ...,
461
+ reduction: str = ...,
462
+ ) -> Tensor: ...
463
+ def multi_margin_loss(
464
+ input: Tensor,
465
+ target: Tensor,
466
+ p: int = ...,
467
+ margin: float = ...,
468
+ weight: Optional[Tensor] = ...,
469
+ size_average: Optional[bool] = ...,
470
+ reduce: Optional[bool] = ...,
471
+ reduction: str = ...,
472
+ ) -> Tensor: ...
473
+ def upsample(
474
+ input: Any,
475
+ size: Optional[Any] = ...,
476
+ scale_factor: Optional[Any] = ...,
477
+ mode: str = ...,
478
+ align_corners: Optional[Any] = ...,
479
+ ): ...
480
+ def interpolate(
481
+ input: Any,
482
+ size: Optional[Any] = ...,
483
+ scale_factor: Optional[Any] = ...,
484
+ mode: str = ...,
485
+ align_corners: Optional[Any] = ...,
486
+ recompute_scale_factor: Optional[Any] = ...,
487
+ antialias: bool = ...,
488
+ ): ...
489
+ def upsample_nearest(
490
+ input: Any,
491
+ size: Optional[Any] = ...,
492
+ scale_factor: Optional[Any] = ...,
493
+ ): ...
494
+ def upsample_bilinear(
495
+ input: Any,
496
+ size: Optional[Any] = ...,
497
+ scale_factor: Optional[Any] = ...,
498
+ ): ...
499
+ def grid_sample(
500
+ input: Tensor,
501
+ grid: Tensor,
502
+ mode: str = ...,
503
+ padding_mode: str = ...,
504
+ align_corners: Optional[Any] = ...,
505
+ ) -> Tensor: ...
506
+ def affine_grid(
507
+ theta: Tensor,
508
+ size: List[int],
509
+ align_corners: Optional[Any] = ...,
510
+ ) -> Tensor: ...
511
+ def triplet_margin_loss(
512
+ anchor: Tensor,
513
+ positive: Tensor,
514
+ negative: Tensor,
515
+ margin: float = ...,
516
+ p: float = ...,
517
+ eps: float = ...,
518
+ swap: bool = ...,
519
+ size_average: Optional[bool] = ...,
520
+ reduce: Optional[bool] = ...,
521
+ reduction: str = ...,
522
+ ) -> Tensor: ...
523
+ def triplet_margin_with_distance_loss(
524
+ anchor: Tensor,
525
+ positive: Tensor,
526
+ negative: Tensor,
527
+ *,
528
+ distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ...,
529
+ margin: float = ...,
530
+ swap: bool = ...,
531
+ reduction: str = ...,
532
+ ) -> Tensor: ...
533
+ def normalize(
534
+ input: Tensor,
535
+ p: float = ...,
536
+ dim: int = ...,
537
+ eps: float = ...,
538
+ out: Optional[Tensor] = ...,
539
+ ) -> Tensor: ...
540
+ def assert_int_or_pair(
541
+ arg: Any,
542
+ arg_name: Any,
543
+ message: Any,
544
+ ) -> None: ...
545
+ def unfold(
546
+ input: Tensor,
547
+ kernel_size: _size_any_t,
548
+ dilation: _size_any_t = ...,
549
+ padding: _size_any_t = ...,
550
+ stride: _size_any_t = ...,
551
+ ) -> Tensor: ...
552
+ def fold(
553
+ input: Tensor,
554
+ output_size: _size_any_t,
555
+ kernel_size: _size_any_t,
556
+ dilation: _size_any_t = ...,
557
+ padding: _size_any_t = ...,
558
+ stride: _size_any_t = ...,
559
+ ) -> Tensor: ...
560
+ def _canonical_mask(
561
+ mask: Optional[Tensor],
562
+ mask_name: str,
563
+ other_type: Optional[_dtype],
564
+ other_name: str,
565
+ target_type: _dtype,
566
+ check_other: bool = True,
567
+ ) -> Optional[Tensor]: ...
568
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ...
569
+ def multi_head_attention_forward(
570
+ query: Tensor,
571
+ key: Tensor,
572
+ value: Tensor,
573
+ embed_dim_to_check: int,
574
+ num_heads: int,
575
+ in_proj_weight: Optional[Tensor],
576
+ in_proj_bias: Optional[Tensor],
577
+ bias_k: Optional[Tensor],
578
+ bias_v: Optional[Tensor],
579
+ add_zero_attn: bool,
580
+ dropout_p: float,
581
+ out_proj_weight: Tensor,
582
+ out_proj_bias: Optional[Tensor],
583
+ training: bool = True,
584
+ key_padding_mask: Optional[Tensor] = None,
585
+ need_weights: bool = True,
586
+ attn_mask: Optional[Tensor] = None,
587
+ use_separate_proj_weight: bool = False,
588
+ q_proj_weight: Optional[Tensor] = None,
589
+ k_proj_weight: Optional[Tensor] = None,
590
+ v_proj_weight: Optional[Tensor] = None,
591
+ static_k: Optional[Tensor] = None,
592
+ static_v: Optional[Tensor] = None,
593
+ average_attn_weights: bool = True,
594
+ is_causal: bool = False,
595
+ ) -> Tuple[Tensor, Optional[Tensor]]: ...
596
+
597
+ from .. import conv1d as conv1d
598
+ from .. import conv2d as conv2d
599
+ from .. import conv3d as conv3d
600
+ from .. import conv_transpose1d as conv_transpose1d
601
+ from .. import conv_transpose2d as conv_transpose2d
602
+ from .. import conv_transpose3d as conv_transpose3d
603
+ from .. import conv_tbc as conv_tbc
604
+ from .. import avg_pool1d as avg_pool1d
605
+ from .. import adaptive_avg_pool1d as adaptive_avg_pool1d
606
+ from .. import relu_ as relu_
607
+ from .. import selu_ as selu_
608
+ from .. import celu_ as celu_
609
+ from .. import prelu as prelu
610
+ from .. import rrelu_ as rrelu_
611
+ from .. import hardshrink as hardshrink
612
+ from .. import bilinear as bilinear
613
+ from .. import pixel_shuffle as pixel_shuffle
614
+ from .. import pixel_unshuffle as pixel_unshuffle
615
+ from .. import channel_shuffle as channel_shuffle
616
+ from .. import native_channel_shuffle as native_channel_shuffle
617
+ from .. import pairwise_distance as pairwise_distance
618
+ from .. import pdist as pdist
619
+ from .. import cosine_similarity as cosine_similarity
620
+ from .._C._nn import avg_pool2d as avg_pool2d
621
+ from .._C._nn import avg_pool3d as avg_pool3d
622
+ from .._C._nn import hardtanh_ as hardtanh_
623
+ from .._C._nn import elu_ as elu_
624
+ from .._C._nn import leaky_relu_ as leaky_relu_
625
+ from .._C._nn import gelu as gelu
626
+ from .._C._nn import softplus as softplus
627
+ from .._C._nn import softshrink as softshrink
628
+ from .._C._nn import linear as linear
629
+ from .._C._nn import pad as pad
630
+ from .._C._nn import one_hot as one_hot
631
+ from .._C._nn import scaled_dot_product_attention as scaled_dot_product_attention
632
+ from .._C._nn import log_sigmoid
633
+ logsigmoid = log_sigmoid
634
+
635
+ @overload
636
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
637
+ @overload
638
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
639
+ @overload
640
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
641
+ @overload
642
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
643
+ @overload
644
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
645
+ @overload
646
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
647
+ @overload
648
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
649
+ @overload
650
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
651
+ @overload
652
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
653
+ @overload
654
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
655
+ @overload
656
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
657
+ @overload
658
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
659
+ @overload
660
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
661
+ @overload
662
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
663
+ @overload
664
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
665
+ @overload
666
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
667
+ @overload
668
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
669
+ @overload
670
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
671
+ @overload
672
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
673
+ @overload
674
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
675
+ @overload
676
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
677
+ @overload
678
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
679
+ @overload
680
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
681
+ @overload
682
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
llmeval-env/lib/python3.10/site-packages/torch/nn/grad.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gradient interface."""
2
+
3
+ import torch
4
+ from .modules.utils import _single, _pair, _triple
5
+
6
+
7
+ def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
8
+ r"""Compute the gradient of conv1d with respect to the input of the convolution.
9
+
10
+ This is same as the 1D transposed convolution operator under the hood but requires
11
+ the shape of the gradient w.r.t. input to be specified explicitly.
12
+
13
+ Args:
14
+ input_size : Shape of the input gradient tensor
15
+ weight: weight tensor (out_channels x in_channels/groups x kW)
16
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
17
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
18
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
19
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
20
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
21
+
22
+ Examples::
23
+
24
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
25
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
26
+ >>> output = F.conv1d(input, weight)
27
+ >>> grad_output = torch.randn(output.shape)
28
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
29
+ >>> F.grad.conv1d_input(input.shape, weight, grad_output)
30
+
31
+ """
32
+ input = grad_output.new_empty(1).expand(input_size)
33
+
34
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
35
+ _single(stride), _single(padding), _single(dilation),
36
+ False, [0], groups, (True, False, False))[0]
37
+
38
+
39
+ def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
40
+ r"""Compute the gradient of conv1d with respect to the weight of the convolution.
41
+
42
+ Args:
43
+ input: input tensor of shape (minibatch x in_channels x iW)
44
+ weight_size : Shape of the weight gradient tensor
45
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
46
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
47
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
48
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
49
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
50
+
51
+ Examples::
52
+
53
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
54
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
55
+ >>> output = F.conv1d(input, weight)
56
+ >>> grad_output = torch.randn(output.shape)
57
+ >>> # xdoctest: +SKIP
58
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
59
+ >>> F.grad.conv1d_weight(input, weight.shape, grad_output)
60
+
61
+ """
62
+ weight = grad_output.new_empty(1).expand(weight_size)
63
+
64
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
65
+ _single(stride), _single(padding), _single(dilation),
66
+ False, [0], groups, (False, True, False))[1]
67
+
68
+
69
+ def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
70
+ r"""Compute the gradient of conv2d with respect to the input of the convolution.
71
+
72
+ This is same as the 2D transposed convolution operator under the hood but requires
73
+ the shape of the gradient w.r.t. input to be specified explicitly.
74
+
75
+ Args:
76
+ input_size : Shape of the input gradient tensor
77
+ weight: weight tensor (out_channels x in_channels/groups x kH x kW)
78
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
79
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
80
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
81
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
82
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
83
+
84
+ Examples::
85
+
86
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
87
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
88
+ >>> output = F.conv2d(input, weight)
89
+ >>> grad_output = torch.randn(output.shape)
90
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
91
+ >>> F.grad.conv2d_input(input.shape, weight, grad_output)
92
+
93
+ """
94
+ input = grad_output.new_empty(1).expand(input_size)
95
+
96
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
97
+ _pair(stride), _pair(padding), _pair(dilation),
98
+ False, [0], groups, (True, False, False))[0]
99
+
100
+
101
+ def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
102
+ r"""Compute the gradient of conv2d with respect to the weight of the convolution.
103
+
104
+ Args:
105
+ input: input tensor of shape (minibatch x in_channels x iH x iW)
106
+ weight_size : Shape of the weight gradient tensor
107
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
108
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
109
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
110
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
111
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
112
+
113
+ Examples::
114
+
115
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
116
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
117
+ >>> output = F.conv2d(input, weight)
118
+ >>> grad_output = torch.randn(output.shape)
119
+ >>> # xdoctest: +SKIP
120
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
121
+ >>> F.grad.conv2d_weight(input, weight.shape, grad_output)
122
+
123
+ """
124
+ weight = grad_output.new_empty(1).expand(weight_size)
125
+
126
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
127
+ _pair(stride), _pair(padding), _pair(dilation),
128
+ False, [0], groups, (False, True, False))[1]
129
+
130
+
131
+ def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
132
+ r"""Compute the gradient of conv3d with respect to the input of the convolution.
133
+
134
+ This is same as the 3D transposed convolution operator under the hood but requires
135
+ the shape of the gradient w.r.t. input to be specified explicitly.
136
+
137
+ Args:
138
+ input_size : Shape of the input gradient tensor
139
+ weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
140
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
141
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
142
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
143
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
144
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
145
+
146
+ Examples::
147
+
148
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
149
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
150
+ >>> output = F.conv3d(input, weight)
151
+ >>> grad_output = torch.randn(output.shape)
152
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
153
+ >>> F.grad.conv3d_input(input.shape, weight, grad_output)
154
+
155
+ """
156
+ input = grad_output.new_empty(1).expand(input_size)
157
+
158
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
159
+ _triple(stride), _triple(padding), _triple(dilation),
160
+ False, [0], groups, (True, False, False))[0]
161
+
162
+
163
+ def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
164
+ r"""Compute the gradient of conv3d with respect to the weight of the convolution.
165
+
166
+ Args:
167
+ input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
168
+ weight_size : Shape of the weight gradient tensor
169
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
170
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
171
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
172
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
173
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
174
+
175
+ Examples::
176
+
177
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
178
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
179
+ >>> output = F.conv3d(input, weight)
180
+ >>> grad_output = torch.randn(output.shape)
181
+ >>> grad_weight = torch.autograd.grad(output, weight, grad_output)
182
+ >>> F.grad.conv3d_weight(input, weight.shape, grad_output)
183
+
184
+ """
185
+ weight = grad_output.new_empty(1).expand(weight_size)
186
+
187
+ return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
188
+ _triple(stride), _triple(padding), _triple(dilation),
189
+ False, [0], groups, (False, True, False))[1]
llmeval-env/lib/python3.10/site-packages/torch/nn/init.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file contains utilities for initializing neural network parameters."""
2
+ import math
3
+ import warnings
4
+
5
+ from torch import Tensor
6
+ import torch
7
+ from typing import Optional as _Optional
8
+
9
+ # These no_grad_* functions are necessary as wrappers around the parts of these
10
+ # functions that use `with torch.no_grad()`. The JIT doesn't support context
11
+ # managers, so these need to be implemented as builtins. Using these wrappers
12
+ # lets us keep those builtins small and re-usable.
13
+ def _no_grad_uniform_(tensor, a, b, generator=None):
14
+ with torch.no_grad():
15
+ return tensor.uniform_(a, b, generator=generator)
16
+
17
+
18
+ def _no_grad_normal_(tensor, mean, std, generator=None):
19
+ with torch.no_grad():
20
+ return tensor.normal_(mean, std, generator=generator)
21
+
22
+
23
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=None):
24
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
25
+ def norm_cdf(x):
26
+ # Computes standard normal cumulative distribution function
27
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
28
+
29
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
30
+ warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
31
+ "The distribution of values may be incorrect.",
32
+ stacklevel=2)
33
+
34
+ with torch.no_grad():
35
+ # Values are generated by using a truncated uniform distribution and
36
+ # then using the inverse CDF for the normal distribution.
37
+ # Get upper and lower cdf values
38
+ l = norm_cdf((a - mean) / std)
39
+ u = norm_cdf((b - mean) / std)
40
+
41
+ # Uniformly fill tensor with values from [l, u], then translate to
42
+ # [2l-1, 2u-1].
43
+ tensor.uniform_(2 * l - 1, 2 * u - 1, generator=generator)
44
+
45
+ # Use inverse cdf transform for normal distribution to get truncated
46
+ # standard normal
47
+ tensor.erfinv_()
48
+
49
+ # Transform to proper mean, std
50
+ tensor.mul_(std * math.sqrt(2.))
51
+ tensor.add_(mean)
52
+
53
+ # Clamp to ensure it's in the proper range
54
+ tensor.clamp_(min=a, max=b)
55
+ return tensor
56
+
57
+
58
+ def _no_grad_fill_(tensor, val):
59
+ with torch.no_grad():
60
+ return tensor.fill_(val)
61
+
62
+
63
+ def _no_grad_zero_(tensor):
64
+ with torch.no_grad():
65
+ return tensor.zero_()
66
+
67
+
68
+ def calculate_gain(nonlinearity, param=None):
69
+ r"""Return the recommended gain value for the given nonlinearity function.
70
+
71
+ The values are as follows:
72
+
73
+ ================= ====================================================
74
+ nonlinearity gain
75
+ ================= ====================================================
76
+ Linear / Identity :math:`1`
77
+ Conv{1,2,3}D :math:`1`
78
+ Sigmoid :math:`1`
79
+ Tanh :math:`\frac{5}{3}`
80
+ ReLU :math:`\sqrt{2}`
81
+ Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
82
+ SELU :math:`\frac{3}{4}`
83
+ ================= ====================================================
84
+
85
+ .. warning::
86
+ In order to implement `Self-Normalizing Neural Networks`_ ,
87
+ you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
88
+ This gives the initial weights a variance of ``1 / N``,
89
+ which is necessary to induce a stable fixed point in the forward pass.
90
+ In contrast, the default gain for ``SELU`` sacrifices the normalization
91
+ effect for more stable gradient flow in rectangular layers.
92
+
93
+ Args:
94
+ nonlinearity: the non-linear function (`nn.functional` name)
95
+ param: optional parameter for the non-linear function
96
+
97
+ Examples:
98
+ >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
99
+
100
+ .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
101
+ """
102
+ linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
103
+ if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
104
+ return 1
105
+ elif nonlinearity == 'tanh':
106
+ return 5.0 / 3
107
+ elif nonlinearity == 'relu':
108
+ return math.sqrt(2.0)
109
+ elif nonlinearity == 'leaky_relu':
110
+ if param is None:
111
+ negative_slope = 0.01
112
+ elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
113
+ # True/False are instances of int, hence check above
114
+ negative_slope = param
115
+ else:
116
+ raise ValueError(f"negative_slope {param} not a valid number")
117
+ return math.sqrt(2.0 / (1 + negative_slope ** 2))
118
+ elif nonlinearity == 'selu':
119
+ return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)
120
+ else:
121
+ raise ValueError(f"Unsupported nonlinearity {nonlinearity}")
122
+
123
+
124
+ def uniform_(
125
+ tensor: Tensor,
126
+ a: float = 0.0,
127
+ b: float = 1.0,
128
+ generator: _Optional[torch.Generator] = None,
129
+ ) -> Tensor:
130
+ r"""Fill the input Tensor with values drawn from the uniform distribution.
131
+
132
+ :math:`\mathcal{U}(a, b)`.
133
+
134
+ Args:
135
+ tensor: an n-dimensional `torch.Tensor`
136
+ a: the lower bound of the uniform distribution
137
+ b: the upper bound of the uniform distribution
138
+ generator: the torch Generator to sample from (default: None)
139
+
140
+ Examples:
141
+ >>> w = torch.empty(3, 5)
142
+ >>> nn.init.uniform_(w)
143
+ """
144
+ if torch.overrides.has_torch_function_variadic(tensor):
145
+ return torch.overrides.handle_torch_function(
146
+ uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator
147
+ )
148
+ return _no_grad_uniform_(tensor, a, b, generator)
149
+
150
+
151
+ def normal_(
152
+ tensor: Tensor,
153
+ mean: float = 0.0,
154
+ std: float = 1.0,
155
+ generator: _Optional[torch.Generator] = None,
156
+ ) -> Tensor:
157
+ r"""Fill the input Tensor with values drawn from the normal distribution.
158
+
159
+ :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
160
+
161
+ Args:
162
+ tensor: an n-dimensional `torch.Tensor`
163
+ mean: the mean of the normal distribution
164
+ std: the standard deviation of the normal distribution
165
+ generator: the torch Generator to sample from (default: None)
166
+
167
+ Examples:
168
+ >>> w = torch.empty(3, 5)
169
+ >>> nn.init.normal_(w)
170
+ """
171
+ if torch.overrides.has_torch_function_variadic(tensor):
172
+ return torch.overrides.handle_torch_function(
173
+ normal_, (tensor,), tensor=tensor, mean=mean, std=std, generator=generator
174
+ )
175
+ return _no_grad_normal_(tensor, mean, std, generator)
176
+
177
+ def trunc_normal_(
178
+ tensor: Tensor,
179
+ mean: float = 0.,
180
+ std: float = 1.,
181
+ a: float = -2.,
182
+ b: float = 2.,
183
+ generator: _Optional[torch.Generator] = None
184
+ ) -> Tensor:
185
+ r"""Fill the input Tensor with values drawn from a truncated normal distribution.
186
+
187
+ The values are effectively drawn from the
188
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
189
+ with values outside :math:`[a, b]` redrawn until they are within
190
+ the bounds. The method used for generating the random values works
191
+ best when :math:`a \leq \text{mean} \leq b`.
192
+
193
+ Args:
194
+ tensor: an n-dimensional `torch.Tensor`
195
+ mean: the mean of the normal distribution
196
+ std: the standard deviation of the normal distribution
197
+ a: the minimum cutoff value
198
+ b: the maximum cutoff value
199
+ generator: the torch Generator to sample from (default: None)
200
+
201
+ Examples:
202
+ >>> w = torch.empty(3, 5)
203
+ >>> nn.init.trunc_normal_(w)
204
+ """
205
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=generator)
206
+
207
+
208
+ def constant_(tensor: Tensor, val: float) -> Tensor:
209
+ r"""Fill the input Tensor with the value :math:`\text{val}`.
210
+
211
+ Args:
212
+ tensor: an n-dimensional `torch.Tensor`
213
+ val: the value to fill the tensor with
214
+
215
+ Examples:
216
+ >>> w = torch.empty(3, 5)
217
+ >>> nn.init.constant_(w, 0.3)
218
+ """
219
+ if torch.overrides.has_torch_function_variadic(tensor):
220
+ return torch.overrides.handle_torch_function(constant_, (tensor,), tensor=tensor, val=val)
221
+ return _no_grad_fill_(tensor, val)
222
+
223
+
224
+ def ones_(tensor: Tensor) -> Tensor:
225
+ r"""Fill the input Tensor with the scalar value `1`.
226
+
227
+ Args:
228
+ tensor: an n-dimensional `torch.Tensor`
229
+
230
+ Examples:
231
+ >>> w = torch.empty(3, 5)
232
+ >>> nn.init.ones_(w)
233
+ """
234
+ return _no_grad_fill_(tensor, 1.)
235
+
236
+
237
+ def zeros_(tensor: Tensor) -> Tensor:
238
+ r"""Fill the input Tensor with the scalar value `0`.
239
+
240
+ Args:
241
+ tensor: an n-dimensional `torch.Tensor`
242
+
243
+ Examples:
244
+ >>> w = torch.empty(3, 5)
245
+ >>> nn.init.zeros_(w)
246
+ """
247
+ return _no_grad_zero_(tensor)
248
+
249
+
250
+ def eye_(tensor):
251
+ r"""Fill the 2-dimensional input `Tensor` with the identity matrix.
252
+
253
+ Preserves the identity of the inputs in `Linear` layers, where as
254
+ many inputs are preserved as possible.
255
+
256
+ Args:
257
+ tensor: a 2-dimensional `torch.Tensor`
258
+
259
+ Examples:
260
+ >>> w = torch.empty(3, 5)
261
+ >>> nn.init.eye_(w)
262
+ """
263
+ if tensor.ndimension() != 2:
264
+ raise ValueError("Only tensors with 2 dimensions are supported")
265
+
266
+ with torch.no_grad():
267
+ torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
268
+ return tensor
269
+
270
+
271
+ def dirac_(tensor, groups=1):
272
+ r"""Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function.
273
+
274
+ Preserves the identity of the inputs in `Convolutional`
275
+ layers, where as many input channels are preserved as possible. In case
276
+ of groups>1, each group of channels preserves identity
277
+
278
+ Args:
279
+ tensor: a {3, 4, 5}-dimensional `torch.Tensor`
280
+ groups (int, optional): number of groups in the conv layer (default: 1)
281
+ Examples:
282
+ >>> w = torch.empty(3, 16, 5, 5)
283
+ >>> nn.init.dirac_(w)
284
+ >>> w = torch.empty(3, 24, 5, 5)
285
+ >>> nn.init.dirac_(w, 3)
286
+ """
287
+ dimensions = tensor.ndimension()
288
+ if dimensions not in [3, 4, 5]:
289
+ raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
290
+
291
+ sizes = tensor.size()
292
+
293
+ if sizes[0] % groups != 0:
294
+ raise ValueError('dim 0 must be divisible by groups')
295
+
296
+ out_chans_per_grp = sizes[0] // groups
297
+ min_dim = min(out_chans_per_grp, sizes[1])
298
+
299
+ with torch.no_grad():
300
+ tensor.zero_()
301
+
302
+ for g in range(groups):
303
+ for d in range(min_dim):
304
+ if dimensions == 3: # Temporal convolution
305
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1
306
+ elif dimensions == 4: # Spatial convolution
307
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
308
+ tensor.size(3) // 2] = 1
309
+ else: # Volumetric convolution
310
+ tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
311
+ tensor.size(3) // 2, tensor.size(4) // 2] = 1
312
+ return tensor
313
+
314
+
315
+ def _calculate_fan_in_and_fan_out(tensor):
316
+ dimensions = tensor.dim()
317
+ if dimensions < 2:
318
+ raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
319
+
320
+ num_input_fmaps = tensor.size(1)
321
+ num_output_fmaps = tensor.size(0)
322
+ receptive_field_size = 1
323
+ if tensor.dim() > 2:
324
+ # math.prod is not always available, accumulate the product manually
325
+ # we could use functools.reduce but that is not supported by TorchScript
326
+ for s in tensor.shape[2:]:
327
+ receptive_field_size *= s
328
+ fan_in = num_input_fmaps * receptive_field_size
329
+ fan_out = num_output_fmaps * receptive_field_size
330
+
331
+ return fan_in, fan_out
332
+
333
+
334
+ def xavier_uniform_(
335
+ tensor: Tensor, gain: float = 1.0, generator: _Optional[torch.Generator] = None
336
+ ) -> Tensor:
337
+ r"""Fill the input `Tensor` with values using a Xavier uniform distribution.
338
+
339
+ The method is described in `Understanding the difficulty of training
340
+ deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010).
341
+ The resulting tensor will have values sampled from
342
+ :math:`\mathcal{U}(-a, a)` where
343
+
344
+ .. math::
345
+ a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
346
+
347
+ Also known as Glorot initialization.
348
+
349
+ Args:
350
+ tensor: an n-dimensional `torch.Tensor`
351
+ gain: an optional scaling factor
352
+ generator: the torch Generator to sample from (default: None)
353
+
354
+ Examples:
355
+ >>> w = torch.empty(3, 5)
356
+ >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
357
+ """
358
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
359
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
360
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
361
+
362
+ return _no_grad_uniform_(tensor, -a, a, generator)
363
+
364
+
365
+ def xavier_normal_(
366
+ tensor: Tensor,
367
+ gain: float = 1.0,
368
+ generator: _Optional[torch.Generator] = None,
369
+ ) -> Tensor:
370
+ r"""Fill the input `Tensor` with values using a Xavier normal distribution.
371
+
372
+ The method is described in `Understanding the difficulty of training deep feedforward
373
+ neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor
374
+ will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where
375
+
376
+ .. math::
377
+ \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
378
+
379
+ Also known as Glorot initialization.
380
+
381
+ Args:
382
+ tensor: an n-dimensional `torch.Tensor`
383
+ gain: an optional scaling factor
384
+ generator: the torch Generator to sample from (default: None)
385
+
386
+ Examples:
387
+ >>> w = torch.empty(3, 5)
388
+ >>> nn.init.xavier_normal_(w)
389
+ """
390
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
391
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
392
+
393
+ return _no_grad_normal_(tensor, 0., std, generator)
394
+
395
+
396
+ def _calculate_correct_fan(tensor, mode):
397
+ mode = mode.lower()
398
+ valid_modes = ['fan_in', 'fan_out']
399
+ if mode not in valid_modes:
400
+ raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}")
401
+
402
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
403
+ return fan_in if mode == 'fan_in' else fan_out
404
+
405
+
406
+ def kaiming_uniform_(
407
+ tensor: Tensor,
408
+ a: float = 0,
409
+ mode: str = "fan_in",
410
+ nonlinearity: str = "leaky_relu",
411
+ generator: _Optional[torch.Generator] = None,
412
+ ):
413
+ r"""Fill the input `Tensor` with values using a Kaiming uniform distribution.
414
+
415
+ The method is described in `Delving deep into rectifiers: Surpassing
416
+ human-level performance on ImageNet classification` - He, K. et al. (2015).
417
+ The resulting tensor will have values sampled from
418
+ :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
419
+
420
+ .. math::
421
+ \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
422
+
423
+ Also known as He initialization.
424
+
425
+ Args:
426
+ tensor: an n-dimensional `torch.Tensor`
427
+ a: the negative slope of the rectifier used after this layer (only
428
+ used with ``'leaky_relu'``)
429
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
430
+ preserves the magnitude of the variance of the weights in the
431
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
432
+ backwards pass.
433
+ nonlinearity: the non-linear function (`nn.functional` name),
434
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
435
+ generator: the torch Generator to sample from (default: None)
436
+
437
+ Examples:
438
+ >>> w = torch.empty(3, 5)
439
+ >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
440
+ """
441
+ if torch.overrides.has_torch_function_variadic(tensor):
442
+ return torch.overrides.handle_torch_function(
443
+ kaiming_uniform_,
444
+ (tensor,),
445
+ tensor=tensor,
446
+ a=a,
447
+ mode=mode,
448
+ nonlinearity=nonlinearity,
449
+ generator=generator)
450
+
451
+ if 0 in tensor.shape:
452
+ warnings.warn("Initializing zero-element tensors is a no-op")
453
+ return tensor
454
+ fan = _calculate_correct_fan(tensor, mode)
455
+ gain = calculate_gain(nonlinearity, a)
456
+ std = gain / math.sqrt(fan)
457
+ bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
458
+ with torch.no_grad():
459
+ return tensor.uniform_(-bound, bound, generator=generator)
460
+
461
+
462
+ def kaiming_normal_(
463
+ tensor: Tensor,
464
+ a: float = 0,
465
+ mode: str = "fan_in",
466
+ nonlinearity: str = "leaky_relu",
467
+ generator: _Optional[torch.Generator] = None,
468
+ ):
469
+ r"""Fill the input `Tensor` with values using a Kaiming normal distribution.
470
+
471
+ The method is described in `Delving deep into rectifiers: Surpassing
472
+ human-level performance on ImageNet classification` - He, K. et al. (2015).
473
+ The resulting tensor will have values sampled from
474
+ :math:`\mathcal{N}(0, \text{std}^2)` where
475
+
476
+ .. math::
477
+ \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
478
+
479
+ Also known as He initialization.
480
+
481
+ Args:
482
+ tensor: an n-dimensional `torch.Tensor`
483
+ a: the negative slope of the rectifier used after this layer (only
484
+ used with ``'leaky_relu'``)
485
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
486
+ preserves the magnitude of the variance of the weights in the
487
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
488
+ backwards pass.
489
+ nonlinearity: the non-linear function (`nn.functional` name),
490
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
491
+ generator: the torch Generator to sample from (default: None)
492
+
493
+ Examples:
494
+ >>> w = torch.empty(3, 5)
495
+ >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
496
+ """
497
+ if 0 in tensor.shape:
498
+ warnings.warn("Initializing zero-element tensors is a no-op")
499
+ return tensor
500
+ fan = _calculate_correct_fan(tensor, mode)
501
+ gain = calculate_gain(nonlinearity, a)
502
+ std = gain / math.sqrt(fan)
503
+ with torch.no_grad():
504
+ return tensor.normal_(0, std, generator=generator)
505
+
506
+
507
+ def orthogonal_(
508
+ tensor,
509
+ gain=1,
510
+ generator: _Optional[torch.Generator] = None,
511
+ ):
512
+ r"""Fill the input `Tensor` with a (semi) orthogonal matrix.
513
+
514
+ Described in `Exact solutions to the nonlinear dynamics of learning in deep
515
+ linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
516
+ at least 2 dimensions, and for tensors with more than 2 dimensions the
517
+ trailing dimensions are flattened.
518
+
519
+ Args:
520
+ tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
521
+ gain: optional scaling factor
522
+ generator: the torch Generator to sample from (default: None)
523
+
524
+ Examples:
525
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
526
+ >>> w = torch.empty(3, 5)
527
+ >>> nn.init.orthogonal_(w)
528
+ """
529
+ if tensor.ndimension() < 2:
530
+ raise ValueError("Only tensors with 2 or more dimensions are supported")
531
+
532
+ if tensor.numel() == 0:
533
+ # no-op
534
+ return tensor
535
+ rows = tensor.size(0)
536
+ cols = tensor.numel() // rows
537
+ flattened = tensor.new(rows, cols).normal_(0, 1, generator=generator)
538
+
539
+ if rows < cols:
540
+ flattened.t_()
541
+
542
+ # Compute the qr factorization
543
+ q, r = torch.linalg.qr(flattened)
544
+ # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
545
+ d = torch.diag(r, 0)
546
+ ph = d.sign()
547
+ q *= ph
548
+
549
+ if rows < cols:
550
+ q.t_()
551
+
552
+ with torch.no_grad():
553
+ tensor.view_as(q).copy_(q)
554
+ tensor.mul_(gain)
555
+ return tensor
556
+
557
+
558
+ def sparse_(
559
+ tensor,
560
+ sparsity,
561
+ std=0.01,
562
+ generator: _Optional[torch.Generator] = None,
563
+ ):
564
+ r"""Fill the 2D input `Tensor` as a sparse matrix.
565
+
566
+ The non-zero elements will be drawn from the normal distribution
567
+ :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
568
+ Hessian-free optimization` - Martens, J. (2010).
569
+
570
+ Args:
571
+ tensor: an n-dimensional `torch.Tensor`
572
+ sparsity: The fraction of elements in each column to be set to zero
573
+ std: the standard deviation of the normal distribution used to generate
574
+ the non-zero values
575
+ generator: the torch Generator to sample from (default: None)
576
+
577
+ Examples:
578
+ >>> w = torch.empty(3, 5)
579
+ >>> nn.init.sparse_(w, sparsity=0.1)
580
+ """
581
+ if tensor.ndimension() != 2:
582
+ raise ValueError("Only tensors with 2 dimensions are supported")
583
+
584
+ rows, cols = tensor.shape
585
+ num_zeros = int(math.ceil(sparsity * rows))
586
+
587
+ with torch.no_grad():
588
+ tensor.normal_(0, std, generator=generator)
589
+ for col_idx in range(cols):
590
+ row_indices = torch.randperm(rows)
591
+ zero_indices = row_indices[:num_zeros]
592
+ tensor[zero_indices, col_idx] = 0
593
+ return tensor
594
+
595
+
596
+ # for backward compatibility
597
+ def _make_deprecate(meth):
598
+ new_name = meth.__name__
599
+ old_name = new_name[:-1]
600
+
601
+ def deprecated_init(*args, **kwargs):
602
+ warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2)
603
+ return meth(*args, **kwargs)
604
+
605
+ deprecated_init.__doc__ = fr"""
606
+ {old_name}(...)
607
+
608
+ .. warning::
609
+ This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
610
+
611
+ See :func:`~torch.nn.init.{new_name}` for details."""
612
+ deprecated_init.__name__ = old_name
613
+ return deprecated_init
614
+
615
+
616
+ uniform = _make_deprecate(uniform_)
617
+ normal = _make_deprecate(normal_)
618
+ constant = _make_deprecate(constant_)
619
+ eye = _make_deprecate(eye_)
620
+ dirac = _make_deprecate(dirac_)
621
+ xavier_uniform = _make_deprecate(xavier_uniform_)
622
+ xavier_normal = _make_deprecate(xavier_normal_)
623
+ kaiming_uniform = _make_deprecate(kaiming_uniform_)
624
+ kaiming_normal = _make_deprecate(kaiming_normal_)
625
+ orthogonal = _make_deprecate(orthogonal_)
626
+ sparse = _make_deprecate(sparse_)
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .parallel_apply import parallel_apply
2
+ from .replicate import replicate
3
+ from .data_parallel import DataParallel, data_parallel
4
+ from .scatter_gather import gather, scatter
5
+ from .distributed import DistributedDataParallel
6
+
7
+ __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
8
+ 'DataParallel', 'DistributedDataParallel']
9
+
10
+ def DistributedDataParallelCPU(*args, **kwargs):
11
+ import warnings
12
+ warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
13
+ "please use torch.nn.parallel.DistributedDataParallel instead.")
14
+ return DistributedDataParallel(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (811 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc ADDED
Binary file (10.3 kB). View file