applied-ai-018 commited on
Commit
52931a7
·
verified ·
1 Parent(s): 8acb2b4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_C/__init__.pyi +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_C/_cudnn.pyi +17 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi +188 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_C/_functions.pyi +11 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_C/_nn.pyi +86 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_C/_nvtx.pyi +6 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_C/_onnx.pyi +38 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_C/_verbose.pyi +3 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__init__.py +18 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py +1991 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__init__.py +19 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py +205 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py +62 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/bin/torch_shm_manager +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/onnx/__init__.py +177 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/onnx/_constants.py +25 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/onnx/_deprecation.py +64 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/onnx/_exporter_states.py +39 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/onnx/_globals.py +85 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_beartype.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/exporter.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py +131 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py +21 -0
env-llmeval/lib/python3.10/site-packages/torch/_C/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/_C/_cudnn.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ from torch.types import _bool, Tuple
4
+
5
+ # Defined in torch/csrc/cuda/shared/cudnn.cpp
6
+ is_cuda: _bool
7
+
8
+ def getRuntimeVersion() -> Tuple[int, int, int]: ...
9
+ def getCompileVersion() -> Tuple[int, int, int]: ...
10
+ def getVersionInt() -> int: ...
11
+
12
+ class RNNMode(int, Enum):
13
+ value: int
14
+ rnn_relu = ...
15
+ rnn_tanh = ...
16
+ lstm = ...
17
+ gru = ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from datetime import timedelta
3
+ from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar
4
+
5
+ import torch
6
+
7
+ from . import Future
8
+ from ._autograd import ProfilerEvent
9
+ from ._distributed_c10d import Store
10
+ from ._profiler import ProfilerConfig
11
+
12
+ # This module is defined in torch/csrc/distributed/rpc/init.cpp
13
+
14
+ _DEFAULT_INIT_METHOD: str
15
+ _DEFAULT_NUM_WORKER_THREADS: int
16
+ _UNSET_RPC_TIMEOUT: float
17
+ _DEFAULT_RPC_TIMEOUT_SEC: float
18
+
19
+ _T = TypeVar("_T")
20
+
21
+ class RpcBackendOptions:
22
+ rpc_timeout: float
23
+ init_method: str
24
+ def __init__(
25
+ self,
26
+ rpc_timeout: float = ...,
27
+ init_method: str = ...,
28
+ ): ...
29
+
30
+ class WorkerInfo:
31
+ def __init__(self, name: str, worker_id: int): ...
32
+ @property
33
+ def name(self) -> str: ...
34
+ @property
35
+ def id(self) -> int: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+
38
+ class RpcAgent:
39
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
40
+ def sync(self): ...
41
+ def shutdown(self): ...
42
+ @overload
43
+ def get_worker_info(self) -> WorkerInfo: ...
44
+ @overload
45
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
46
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
47
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
48
+ def get_debug_info(self) -> Dict[str, str]: ...
49
+ def get_metrics(self) -> Dict[str, str]: ...
50
+
51
+ class PyRRef(Generic[_T]):
52
+ def __init__(self, value: _T, type_hint: Any = None) -> None: ...
53
+ def is_owner(self) -> bool: ...
54
+ def confirmed_by_owner(self) -> bool: ...
55
+ def owner(self) -> WorkerInfo: ...
56
+ def owner_name(self) -> str: ...
57
+ def to_here(self, timeout: float = ...) -> _T: ...
58
+ def local_value(self) -> Any: ...
59
+ def rpc_sync(self, timeout: float = ...) -> Any: ...
60
+ def rpc_async(self, timeout: float = ...) -> Any: ...
61
+ def remote(self, timeout: float = ...) -> Any: ...
62
+ def _serialize(self) -> Tuple: ...
63
+ @staticmethod
64
+ def _deserialize(tp: Tuple) -> PyRRef: ...
65
+ def _get_type(self) -> Type[_T]: ...
66
+ def _get_future(self) -> Future[_T]: ...
67
+ def _get_profiling_future(self) -> Future[_T]: ...
68
+ def _set_profiling_future(self, profilingFuture: Future[_T]): ...
69
+
70
+ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions):
71
+ num_worker_threads: int
72
+ device_maps: Dict[str, Dict[torch.device, torch.device]]
73
+ devices: List[torch.device]
74
+ def __init__(
75
+ self,
76
+ num_worker_threads: int,
77
+ _transports: Optional[List],
78
+ _channels: Optional[List],
79
+ rpc_timeout: float = ...,
80
+ init_method: str = ...,
81
+ device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, # noqa: B006
82
+ devices: List[torch.device] = [], # noqa: B006
83
+ ): ...
84
+ def _set_device_map(
85
+ self,
86
+ to: str,
87
+ device_map: Dict[torch.device, torch.device],
88
+ ): ...
89
+
90
+ class TensorPipeAgent(RpcAgent):
91
+ def __init__(
92
+ self,
93
+ store: Store,
94
+ name: str,
95
+ worker_id: int,
96
+ world_size: Optional[int],
97
+ opts: _TensorPipeRpcBackendOptionsBase,
98
+ reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
99
+ devices: List[torch.device],
100
+ ): ...
101
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
102
+ def shutdown(self): ...
103
+ @overload
104
+ def get_worker_info(self) -> WorkerInfo: ...
105
+ @overload
106
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
107
+ @overload
108
+ def get_worker_info(self, id: int) -> WorkerInfo: ...
109
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
110
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
111
+ def _update_group_membership(
112
+ self,
113
+ worker_info: WorkerInfo,
114
+ my_devices: List[torch.device],
115
+ reverse_device_map: Dict[str, Dict[torch.device, torch.device]],
116
+ is_join: bool,
117
+ ): ...
118
+ def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ...
119
+ @property
120
+ def is_static_group(self) -> bool: ...
121
+ @property
122
+ def store(self) -> Store: ...
123
+
124
+ def _is_current_rpc_agent_set() -> bool: ...
125
+ def _get_current_rpc_agent() -> RpcAgent: ...
126
+ def _set_and_start_rpc_agent(agent: RpcAgent): ...
127
+ def _reset_current_rpc_agent(): ...
128
+ def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ...
129
+ def _destroy_rref_context(ignoreRRefLeak: bool): ...
130
+ def _rref_context_get_debug_info() -> Dict[str, str]: ...
131
+ def _cleanup_python_rpc_handler(): ...
132
+ def _invoke_rpc_builtin(
133
+ dst: WorkerInfo,
134
+ opName: str,
135
+ rpcTimeoutSeconds: float,
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ): ...
139
+ def _invoke_rpc_python_udf(
140
+ dst: WorkerInfo,
141
+ pickledPythonUDF: str,
142
+ tensors: List[torch.Tensor],
143
+ rpcTimeoutSeconds: float,
144
+ isAsyncExecution: bool,
145
+ ): ...
146
+ def _invoke_rpc_torchscript(
147
+ dstWorkerName: str,
148
+ qualifiedNameStr: str,
149
+ argsTuple: Tuple,
150
+ kwargsDict: Dict,
151
+ rpcTimeoutSeconds: float,
152
+ isAsyncExecution: bool,
153
+ ): ...
154
+ def _invoke_remote_builtin(
155
+ dst: WorkerInfo,
156
+ opName: str,
157
+ rpcTimeoutSeconds: float,
158
+ *args: Any,
159
+ **kwargs: Any,
160
+ ): ...
161
+ def _invoke_remote_python_udf(
162
+ dst: WorkerInfo,
163
+ pickledPythonUDF: str,
164
+ tensors: List[torch.Tensor],
165
+ rpcTimeoutSeconds: float,
166
+ isAsyncExecution: bool,
167
+ ): ...
168
+ def _invoke_remote_torchscript(
169
+ dstWorkerName: WorkerInfo,
170
+ qualifiedNameStr: str,
171
+ rpcTimeoutSeconds: float,
172
+ isAsyncExecution: bool,
173
+ *args: Any,
174
+ **kwargs: Any,
175
+ ): ...
176
+ def get_rpc_timeout() -> float: ...
177
+ def enable_gil_profiling(flag: bool): ...
178
+ def _set_rpc_timeout(rpcTimeoutSeconds: float): ...
179
+
180
+ class RemoteProfilerManager:
181
+ @staticmethod
182
+ def set_current_profiling_key(key: str): ...
183
+
184
+ def _enable_server_process_global_profiler(new_config: ProfilerConfig): ...
185
+ def _disable_server_process_global_profiler() -> List[List[List[ProfilerEvent]]]: ...
186
+ def _set_profiler_node_id(default_node_id: int): ...
187
+ def _enable_jit_rref_pickle(): ...
188
+ def _disable_jit_rref_pickle(): ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_functions.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import AnyStr, List
2
+
3
+ from torch import Tensor
4
+
5
+ class UndefinedGrad:
6
+ def __init__(self) -> None: ...
7
+ def __call__(self, *inputs: Tensor) -> List[Tensor]: ...
8
+
9
+ class DelayedError:
10
+ def __init__(self, msg: AnyStr, num_inputs: int) -> None: ...
11
+ def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_nn.pyi ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from typing import List, Optional, overload, Sequence, Tuple, Union
3
+
4
+ from torch import memory_format, Tensor
5
+ from torch.types import _bool, _device, _dtype, _int, _size
6
+
7
+ # Defined in tools/autograd/templates/python_nn_functions.cpp
8
+
9
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
10
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
11
+ def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
12
+ def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
13
+ def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
14
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
15
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
16
+ def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
17
+ def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
18
+ def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
19
+ def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ...
20
+ def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
21
+ def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
22
+ def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
23
+ def log_sigmoid(input: Tensor) -> Tensor: ...
24
+ def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
25
+ def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ...
26
+ def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None) -> Tensor: ...
27
+ def softplus(input: Tensor, beta: int = ..., threshold: int = ...) -> Tensor: ...
28
+ def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
29
+
30
+ # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
31
+ def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
32
+
33
+ # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
34
+ def mkldnn_reorder_conv2d_weight(
35
+ self: Tensor,
36
+ padding: List,
37
+ stride: List,
38
+ dilatation: List,
39
+ groups: int,
40
+ ) -> Tensor: ...
41
+ def mkldnn_reorder_conv3d_weight(
42
+ self: Tensor,
43
+ padding: List,
44
+ stride: List,
45
+ dilatation: List,
46
+ groups: int,
47
+ ) -> Tensor: ...
48
+
49
+ # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
50
+ def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
51
+
52
+ # Defined at tools/autograd/templates/python_nn_functions.cpp
53
+ @overload
54
+ def _parse_to(
55
+ device: _device,
56
+ dtype: _dtype,
57
+ non_blocking: _bool,
58
+ copy: _bool,
59
+ *,
60
+ memory_format: memory_format,
61
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
62
+ @overload
63
+ def _parse_to(
64
+ dtype: _dtype,
65
+ non_blocking: _bool,
66
+ copy: _bool,
67
+ *,
68
+ memory_format: memory_format,
69
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
70
+ @overload
71
+ def _parse_to(
72
+ tensor: Tensor,
73
+ non_blocking: _bool,
74
+ copy: _bool,
75
+ *,
76
+ memory_format: memory_format,
77
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
78
+
79
+ # Defined in aten/src/ATen/native/PadSequence.cpp
80
+ def pad_sequence(
81
+ sequences: List[Tensor],
82
+ batch_first: bool = False,
83
+ padding_value: float = ...,
84
+ ) -> Tensor: ...
85
+ def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
86
+ def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_nvtx.pyi ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/cuda/shared/nvtx.cpp
2
+ def rangePushA(message: str) -> int: ...
3
+ def rangePop() -> int: ...
4
+ def rangeStartA(message: str) -> int: ...
5
+ def rangeEnd(int) -> None: ...
6
+ def markA(message: str) -> None: ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_onnx.pyi ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/onnx/init.cpp
2
+
3
+ from enum import Enum
4
+
5
+ _CAFFE2_ATEN_FALLBACK: bool
6
+ PRODUCER_VERSION: str
7
+
8
+ class TensorProtoDataType(Enum):
9
+ UNDEFINED = ...
10
+ FLOAT = ...
11
+ UINT8 = ...
12
+ INT8 = ...
13
+ UINT16 = ...
14
+ INT16 = ...
15
+ INT32 = ...
16
+ INT64 = ...
17
+ STRING = ...
18
+ BOOL = ...
19
+ FLOAT16 = ...
20
+ DOUBLE = ...
21
+ UINT32 = ...
22
+ UINT64 = ...
23
+ COMPLEX64 = ...
24
+ COMPLEX128 = ...
25
+ BFLOAT16 = ...
26
+ FLOAT8E5M2 = ...
27
+ FLOAT8E4M3FN = ...
28
+
29
+ class OperatorExportTypes(Enum):
30
+ ONNX = ...
31
+ ONNX_ATEN = ...
32
+ ONNX_ATEN_FALLBACK = ...
33
+ ONNX_FALLTHROUGH = ...
34
+
35
+ class TrainingMode(Enum):
36
+ EVAL = ...
37
+ PRESERVE = ...
38
+ TRAINING = ...
env-llmeval/lib/python3.10/site-packages/torch/_C/_verbose.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/utils/verbose.cpp
2
+ def mkl_set_verbose(enable: int) -> int: ...
3
+ def mkldnn_set_verbose(level: int) -> int: ...
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._subclasses.fake_tensor import (
4
+ DynamicOutputShapeException,
5
+ FakeTensor,
6
+ FakeTensorMode,
7
+ UnsupportedFakeTensorException,
8
+ )
9
+
10
+ from torch._subclasses.fake_utils import CrossRefFakeMode
11
+
12
+ __all__ = [
13
+ "FakeTensor",
14
+ "FakeTensorMode",
15
+ "UnsupportedFakeTensorException",
16
+ "DynamicOutputShapeException",
17
+ "CrossRefFakeMode",
18
+ ]
env-llmeval/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py ADDED
@@ -0,0 +1,1991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import itertools
4
+ import logging
5
+ import os
6
+ import sys
7
+ import traceback
8
+ import weakref
9
+ from dataclasses import dataclass
10
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
11
+ from weakref import ReferenceType
12
+
13
+ import torch
14
+ import torch._custom_op
15
+ import torch._logging
16
+
17
+ from torch._guards import Source
18
+ from torch._ops import OpOverload
19
+ from torch._prims_common import (
20
+ elementwise_dtypes,
21
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
22
+ is_boolean_dtype,
23
+ is_float_dtype,
24
+ is_integer_dtype,
25
+ )
26
+ from torch._subclasses.meta_utils import MetaConverter
27
+ from torch._utils import render_call
28
+ from torch.fx.operator_schemas import normalize_function
29
+ from torch.multiprocessing.reductions import StorageWeakRef
30
+ from torch.overrides import TorchFunctionMode
31
+ from torch.utils._mode_utils import no_dispatch
32
+ from torch.utils._python_dispatch import (
33
+ is_traceable_wrapper_subclass,
34
+ TorchDispatchMode,
35
+ )
36
+
37
+ from torch.utils._pytree import PyTree, tree_map
38
+ from torch.utils._stats import count, count_label
39
+ from torch.utils.weak import WeakIdRef
40
+
41
+ DimList = List
42
+
43
+ log = logging.getLogger(__name__)
44
+ not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
45
+
46
+ pytree = torch.utils._pytree
47
+ T = TypeVar("T")
48
+ TensorWeakRef = Any
49
+
50
+ aten = torch._ops.ops.aten
51
+
52
+ CONSTANT_NUMEL_LIMIT = 1
53
+
54
+ RECURSION_COUNT = 0
55
+
56
+
57
+ # Small helper that increments recursion count, and
58
+ # resets it when the object goes out of scope. Useful
59
+ # if you don't want to increase indentation which is
60
+ # what a context manager would do.
61
+ class IncrementRecursionCount:
62
+ def __init__(self):
63
+ global RECURSION_COUNT
64
+ RECURSION_COUNT += 1
65
+
66
+ def __del__(self):
67
+ global RECURSION_COUNT
68
+ RECURSION_COUNT -= 1
69
+
70
+
71
+ @dataclass
72
+ class UnsupportedFakeTensorException(RuntimeError):
73
+ reason: str
74
+
75
+
76
+ @dataclass
77
+ class DynamicOutputShapeException(RuntimeError):
78
+ func: OpOverload
79
+
80
+
81
+ @dataclass
82
+ class DataDependentOutputException(RuntimeError):
83
+ func: OpOverload
84
+
85
+
86
+ @dataclass
87
+ class UnsupportedOperatorException(RuntimeError):
88
+ func: OpOverload
89
+
90
+
91
+ _device_not_kwarg_ops = (
92
+ aten._resize_output_.default,
93
+ aten._nested_tensor_from_tensor_list.default,
94
+ aten._nested_tensor_from_tensor_list.out,
95
+ aten.pin_memory.default,
96
+ aten.is_pinned.default,
97
+ aten.to.device,
98
+ aten.to.prim_Device,
99
+ aten._pin_memory.default,
100
+ aten._pin_memory.out,
101
+ aten._resize_output.default,
102
+ aten._resize_output.out,
103
+ )
104
+
105
+ # this op is never actually used
106
+ _non_kwarg_device_constructors = (aten._list_to_tensor,)
107
+
108
+
109
+ # This function indicates if the backend device
110
+ # supports non-contiguous tensors
111
+ def is_noncontiguous_supported(device):
112
+ if device.type == "hpu":
113
+ return False
114
+ return True
115
+
116
+
117
+ def contains_tensor_types(type):
118
+ tensor_type = torch._C.TensorType.get()
119
+ return type.isSubtypeOf(tensor_type) or any(
120
+ contains_tensor_types(e) for e in type.containedTypes()
121
+ )
122
+
123
+
124
+ _like_tensor_constructors = (
125
+ aten.empty_like.default,
126
+ aten.empty_like.out,
127
+ aten.full_like.default,
128
+ aten.full_like.out,
129
+ aten.ones_like.default,
130
+ aten.ones_like.out,
131
+ aten.rand_like.default,
132
+ aten.rand_like.out,
133
+ aten.randn_like.default,
134
+ aten.randn_like.out,
135
+ aten.randint_like.default,
136
+ aten.randint_like.out,
137
+ aten.randint_like.low_dtype,
138
+ aten.randint_like.low_dtype_out,
139
+ aten.zeros_like.default,
140
+ aten.zeros_like.out,
141
+ aten.new_empty.default,
142
+ aten.new_empty.out,
143
+ aten.new_empty_strided.default,
144
+ aten.new_empty_strided.out,
145
+ aten.new_full.default,
146
+ aten.new_full.out,
147
+ aten.new_zeros.default,
148
+ aten.new_zeros.out,
149
+ aten.new_ones.default,
150
+ aten.new_ones.out,
151
+ )
152
+
153
+
154
+ @contextlib.contextmanager
155
+ def unset_fake_temporarily():
156
+ old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE)
157
+ try:
158
+ yield old
159
+ finally:
160
+ if old is not None:
161
+ torch._C._set_dispatch_mode(old)
162
+
163
+
164
+ @functools.lru_cache(None)
165
+ def _is_tensor_constructor(func: OpOverload):
166
+ assert isinstance(func, OpOverload)
167
+ schema = func._schema
168
+ if any(contains_tensor_types(arg.type) for arg in schema.arguments):
169
+ return False
170
+ # TODO: no real reason to restrict multiple outputs
171
+ return (
172
+ len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()
173
+ )
174
+
175
+
176
+ def is_fake(x):
177
+ if isinstance(x, FakeTensor):
178
+ return True
179
+ if is_traceable_wrapper_subclass(x):
180
+ attrs, _ = type(x).__tensor_flatten__(x)
181
+ flattened_tensors = [getattr(x, attr) for attr in attrs]
182
+ # need to recurse because we could have nested subclasses
183
+ all_fake = all(is_fake(x) for x in flattened_tensors)
184
+ any_fake = any(is_fake(x) for x in flattened_tensors)
185
+ assert all_fake == any_fake, "got mixed fake and real tensors!"
186
+ return all_fake
187
+ elif isinstance(x, torch.Tensor) and torch._is_functional_tensor(x):
188
+ reapply_views = torch._C._functionalization_reapply_views_tls()
189
+ unwrapped = torch._C._functorch._unwrap_functional_tensor(x, reapply_views)
190
+ return is_fake(unwrapped)
191
+ return False
192
+
193
+
194
+ def maybe_get_fake_mode(t):
195
+ if isinstance(t, FakeTensor):
196
+ return t.fake_mode
197
+ if is_traceable_wrapper_subclass(t):
198
+ inner_tensor_names, _ = t.__tensor_flatten__()
199
+ modes = [
200
+ maybe_get_fake_mode(getattr(t, t_name)) for t_name in inner_tensor_names
201
+ ]
202
+ m = modes[0]
203
+ assert all(m is x for x in modes)
204
+ return m
205
+ elif isinstance(t, torch.Tensor) and torch._is_functional_tensor(t):
206
+ reapply_views = torch._C._functionalization_reapply_views_tls()
207
+ unwrapped = torch._C._functorch._unwrap_functional_tensor(t, reapply_views)
208
+ return maybe_get_fake_mode(unwrapped)
209
+ return None
210
+
211
+
212
+ @functools.lru_cache(None)
213
+ def get_schema_info(func):
214
+ return torch._C._SchemaInfo(func._schema) # type: ignore[attr-defined]
215
+
216
+
217
+ # many of the decompositions registered to torch/_prims do not at the moment model
218
+ # aliasing or strides, so as an incremental step, just enable the decompositions in
219
+ # torch/_decomp/decompositions.py.
220
+ # decomps are used for aot autograd tracing so we would like to unify on their
221
+ # implementation and add additional testing to them
222
+ @functools.lru_cache(None)
223
+ def torch_decomp_decompositions(func):
224
+ from torch._decomp import decomposition_table
225
+
226
+ decompositions = torch._decomp.decompositions
227
+ decomp_attrs = [getattr(decompositions, attr) for attr in dir(decompositions)]
228
+ return decomposition_table[func] in decomp_attrs
229
+
230
+
231
+ def tree_flatten_only(ty: Type[T], tree: PyTree):
232
+ flat_vals = pytree.tree_leaves(tree)
233
+ return [elem for elem in flat_vals if isinstance(elem, ty)]
234
+
235
+
236
+ # Similar to `MetaConverter`, this is a class for converting
237
+ # multiple tensors into fake tensors which share the same view/storage
238
+ # structure. Like `MetaConverter`, it uses `WeakIdRef` to
239
+ # hold a weak reference for all memoized tensors.
240
+ class FakeTensorConverter:
241
+ @property
242
+ def tensor_memo(self):
243
+ return self.meta_converter.tensor_memo
244
+
245
+ meta_converter: MetaConverter
246
+ constant_storage_mapping: Dict[StorageWeakRef, List[ReferenceType]]
247
+
248
+ def __init__(self):
249
+ self.meta_converter = MetaConverter()
250
+
251
+ # map from to storage to corresponding constant tensors
252
+ self.constant_storage_mapping = {}
253
+
254
+ def add_constant_storage_mapping(self, fake_tensor):
255
+ # when you have a constant, aliased tensor:
256
+ # const_tensor.add_(torch.rand([1]))
257
+ # all aliases of it must become no longer const
258
+ assert isinstance(fake_tensor, FakeTensor) and fake_tensor.constant is not None
259
+ weak_st = StorageWeakRef(fake_tensor.constant._typed_storage())
260
+
261
+ # we need a map from a weak storage to all of its corresponding
262
+ # constant tensors. python doesn't have the weak value equivalent
263
+ # of defaultdict(list), so we are using a WeakValueDictionary as one
264
+ if weak_st not in self.constant_storage_mapping:
265
+ self.constant_storage_mapping[weak_st] = []
266
+ self.constant_storage_mapping[weak_st].append(weakref.ref(fake_tensor))
267
+
268
+ def invalidate_constant_aliases(self, tensor):
269
+ assert not isinstance(tensor, FakeTensor)
270
+
271
+ weak_st = StorageWeakRef(tensor._typed_storage())
272
+ if weak_st not in self.constant_storage_mapping:
273
+ return
274
+
275
+ for weak_tensor_ref in self.constant_storage_mapping[weak_st]:
276
+ ten = weak_tensor_ref()
277
+ if ten is not None:
278
+ ten._fix_weakref()
279
+ ten.constant = None
280
+
281
+ del self.constant_storage_mapping[weak_st]
282
+
283
+ def _get_memo(self, t):
284
+ if WeakIdRef(t) in self.tensor_memo:
285
+ out = self.tensor_memo[WeakIdRef(t)]
286
+ out._fix_weakref()
287
+ return out
288
+ return None
289
+
290
+ def set_tensor_memo(self, t, v):
291
+ th = WeakIdRef(t)
292
+
293
+ # hold a weak ref to self, otherwise it will be kept alive
294
+ # by the del_ten closure
295
+ self_weak_ref = weakref.ref(self)
296
+
297
+ def del_ten():
298
+ self_ref = self_weak_ref()
299
+ if self_ref is None:
300
+ return
301
+ # on shutdown, th may not be in memo
302
+ self_ref.tensor_memo.pop(th, None)
303
+
304
+ weakref.finalize(t, del_ten)
305
+ self.tensor_memo[th] = v
306
+
307
+ def from_real_tensor(
308
+ self,
309
+ fake_mode,
310
+ t,
311
+ make_constant=False,
312
+ shape_env=None,
313
+ *,
314
+ source=None,
315
+ symbolic_context=None,
316
+ memoized_only=False,
317
+ ):
318
+ # see note [Tensor Fakification and Symbol Caching]
319
+ if not symbolic_context and not source and shape_env:
320
+ if tracing_context := torch._guards.TracingContext.try_get():
321
+ if t in tracing_context.tensor_to_context:
322
+ symbolic_context = tracing_context.tensor_to_context[t]
323
+ source = symbolic_context.tensor_source
324
+
325
+ maybe_memo = self._get_memo(t)
326
+ if maybe_memo is not None:
327
+ return maybe_memo
328
+ if memoized_only:
329
+ return None
330
+ existing_device = t.device
331
+ # not yet supported in metatensors
332
+ if t.is_quantized:
333
+ raise UnsupportedFakeTensorException("quantized nyi in meta tensors")
334
+ if type(t) is torch.nn.Parameter:
335
+ assert not make_constant
336
+
337
+ def mk_fake_tensor(make_meta_t):
338
+ # NB: don't use in_kernel_invocation_manager. to
339
+ # ensure FakeTensor can internally do constant computation
340
+ # as necessary. Invocation manager is "more correct" as
341
+ # it works for more operators in make_meta_t, but
342
+ # invariant is that make_meta_t only calls factories
343
+ # for which it is not strictly necessary to use the
344
+ # invocation manager (I think!)
345
+ with no_dispatch():
346
+ return FakeTensor(
347
+ fake_mode,
348
+ make_meta_t(),
349
+ existing_device,
350
+ constant=t if make_constant else None,
351
+ )
352
+
353
+ out = self.meta_converter(
354
+ t,
355
+ shape_env=shape_env,
356
+ callback=mk_fake_tensor,
357
+ source=source,
358
+ symbolic_context=symbolic_context,
359
+ )
360
+ if out is NotImplemented:
361
+ raise UnsupportedFakeTensorException("meta converter nyi")
362
+ if make_constant:
363
+ self.add_constant_storage_mapping(out)
364
+ # NB: meta_converter set the memo
365
+ return out
366
+
367
+ # If you specify the device, it MUST be a meta tensor.
368
+ def from_meta_and_device(self, fake_mode, t, device):
369
+ assert (
370
+ t.device.type == "meta"
371
+ ), f"tensor's device must be `meta`, got {t.device.type} instead"
372
+ maybe_memo = self._get_memo(t)
373
+ if maybe_memo is not None:
374
+ return maybe_memo
375
+ out = FakeTensor(fake_mode, t, device)
376
+ self.set_tensor_memo(t, out)
377
+ return out
378
+
379
+ # You can have a real tensor that you need to convert into a fake tensor.
380
+ # If you have a meta tensor already, call from_meta_and_device.
381
+ #
382
+ # You're allowed to pass a meta tensor to be turned into a fake
383
+ # tensor; although an odd thing to do, this can occur if you're doing
384
+ # cross ref testing and the inner test is already operating on meta tensors.
385
+ def __call__(
386
+ self,
387
+ fake_mode,
388
+ t,
389
+ *,
390
+ make_constant=False,
391
+ shape_env=None,
392
+ source=None,
393
+ symbolic_context=None,
394
+ memoized_only=False,
395
+ ):
396
+ return self.from_real_tensor(
397
+ fake_mode,
398
+ t,
399
+ make_constant,
400
+ shape_env=shape_env,
401
+ source=source,
402
+ symbolic_context=symbolic_context,
403
+ memoized_only=memoized_only,
404
+ )
405
+
406
+
407
+ op_implementations = []
408
+
409
+
410
+ def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]):
411
+ def impl_decorator(op_impl):
412
+ global op_implementations
413
+ if isinstance(run_impl_check, OpOverload):
414
+ op_implementations.append((lambda func: func == run_impl_check, op_impl))
415
+ else:
416
+ op_implementations.append((run_impl_check, op_impl))
417
+
418
+ return op_impl
419
+
420
+ return impl_decorator
421
+
422
+
423
+ @register_op_impl(
424
+ lambda func: (_is_tensor_constructor(func) or func in _like_tensor_constructors)
425
+ )
426
+ def constructors(fake_mode, func, *args, **kwargs):
427
+ assert func not in _non_kwarg_device_constructors
428
+ _, new_kwargs = normalize_function(
429
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
430
+ )
431
+ if func in _like_tensor_constructors:
432
+ default_device = new_kwargs["input"].device
433
+ # TODO: file issue
434
+ args = (new_kwargs.pop("input"),)
435
+ else:
436
+ # cpu is default device if none is specified
437
+ default_device = torch.device("cpu")
438
+ args = ()
439
+ out_device = new_kwargs.pop("device", None)
440
+ out_device = out_device if out_device is not None else default_device
441
+ new_kwargs["device"] = torch.device("meta")
442
+ # _like constructors have fake tensor inputs (maybe this causes the non-like
443
+ # to fail? hmmm)
444
+ with in_kernel_invocation_manager(fake_mode):
445
+ r = func(*args, **new_kwargs)
446
+ return FakeTensor(fake_mode, r, out_device)
447
+
448
+
449
+ @register_op_impl(lambda func: func in (aten.to.prim_Device, aten.to.device))
450
+ def non_kwarg_to(fake_mode, func, *args, **kwargs):
451
+ _, new_kwargs = normalize_function(
452
+ func, args, kwargs, normalize_to_only_use_kwargs=True
453
+ )
454
+ input_device = new_kwargs["device"]
455
+ out_device = input_device if input_device else new_kwargs["input"].device
456
+ new_kwargs["device"] = torch.device("meta")
457
+ inp = new_kwargs.pop("input")
458
+ with in_kernel_invocation_manager(fake_mode):
459
+ r = func(inp, **new_kwargs)
460
+ # TODO: I think this does the wrong thing if r is inp
461
+ return fake_mode.fake_tensor_converter.from_meta_and_device(
462
+ fake_mode, r, out_device
463
+ )
464
+
465
+
466
+ def stride_incorrect_op(op):
467
+ if op.namespace not in ("aten", "prims"):
468
+ return False
469
+ if op is aten._fft_c2c.default:
470
+ return False
471
+
472
+ op_name = op.name()
473
+ if "fft" in op_name:
474
+ return True
475
+ return False
476
+
477
+
478
+ # These operators have meta implementations with incorrect strides
479
+ @register_op_impl(stride_incorrect_op)
480
+ def wordaround_stride_incorrect_op(fake_mode, func, *args, **kwargs):
481
+ # This is a workaround for meta implmentations with incorrect strides
482
+
483
+ def is_symbolic(x):
484
+ if isinstance(x, FakeTensor):
485
+ return x._has_symbolic_sizes_strides
486
+ if isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool)):
487
+ return True
488
+ return False
489
+
490
+ # For static shapes, we can fall back to eager for the real strides
491
+ if fake_mode.allow_fallback_kernels:
492
+ require_dynamic = any(
493
+ is_symbolic(x) for x in itertools.chain(args, kwargs.values())
494
+ )
495
+ if not require_dynamic:
496
+ flat_args, args_spec = pytree.tree_flatten((args, kwargs))
497
+ return run_fallback_kernel(fake_mode, func, flat_args, args_spec, None)
498
+
499
+ raise UnsupportedOperatorException(func)
500
+
501
+
502
+ # Dont default to default device handling,
503
+ # since the device of `the_template` is ignored
504
+ @register_op_impl(aten.resize_as_.default)
505
+ def resize_as_(fake_mode, func, *args, **kwargs):
506
+ with in_kernel_invocation_manager(fake_mode):
507
+ return func(*args, **kwargs)
508
+
509
+
510
+ @register_op_impl(aten._sparse_coo_tensor_with_dims_and_tensors.default)
511
+ def _sparse_coo_tensor_with_dims_and_tensors(fake_mode, func, *args, **kwargs):
512
+ # TODO: remove me
513
+ return constructors(fake_mode, func, *args, **kwargs)
514
+
515
+
516
+ # index.Tensor data-dependent in only some conditions
517
+ @register_op_impl(
518
+ lambda func: torch.Tag.dynamic_output_shape in func.tags
519
+ and func
520
+ not in [aten.index.Tensor, aten.nonzero.default, aten.repeat_interleave.Tensor]
521
+ )
522
+ def dyn_shape(fake_mode, func, *args, **kwargs):
523
+ raise DynamicOutputShapeException(func)
524
+
525
+
526
+ @register_op_impl(lambda func: func is aten.repeat_interleave.Tensor)
527
+ def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None):
528
+ if output_size is None:
529
+ if (
530
+ fake_mode.shape_env is None
531
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
532
+ ):
533
+ raise DynamicOutputShapeException(func)
534
+
535
+ output_size = fake_mode.shape_env.create_unbacked_symint()
536
+
537
+ # Avoid importing sympy at a module level
538
+ from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size
539
+
540
+ _constrain_range_for_size(output_size)
541
+ # TODO: consider a memo
542
+ return repeats.new_empty(output_size)
543
+
544
+
545
+ @register_op_impl(lambda func: func is torch.ops.aten._local_scalar_dense.default)
546
+ def local_scalar_dense(fake_mode, func, arg):
547
+ if fake_mode.shape_env is None or not fake_mode.shape_env.allow_scalar_outputs:
548
+ # Without symints/symfloats, cannot handle this
549
+ raise DataDependentOutputException(func)
550
+ if is_float_dtype(arg.dtype):
551
+ return fake_mode.shape_env.create_unbacked_symfloat()
552
+ elif is_integer_dtype(arg.dtype):
553
+ return fake_mode.shape_env.create_unbacked_symint()
554
+ elif is_boolean_dtype(arg.dtype):
555
+ return fake_mode.shape_env.create_unbacked_symbool()
556
+ else:
557
+ raise NotImplementedError(f"local_scalar_dense/item NYI for {arg.dtype}")
558
+
559
+
560
+ @register_op_impl(lambda func: func is torch.ops.aten.nonzero.default)
561
+ def nonzero(fake_mode, func, arg):
562
+ if (
563
+ fake_mode.shape_env is None
564
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
565
+ ):
566
+ # Without symints/symfloats, cannot handle this
567
+ raise DynamicOutputShapeException(func)
568
+
569
+ if arg.nonzero_memo is None:
570
+ nnz = fake_mode.shape_env.create_unbacked_symint()
571
+
572
+ # This is unsound, but it works well in practice
573
+ # See https://docs.google.com/document/d/1lFRYAJo5nrfxRhwIzGnfi2pbLpU6T4ytSRSuLJ5qebI/edit#
574
+ # TODO: Add a config knob to turn off this unsound behavior
575
+ #
576
+ # NB: If numel < 2, the bounds here might be COMPLETELY
577
+ # disjoint with what can actually occur. But this is fine:
578
+ # remember, the hypothesis is that if your later code works
579
+ # with N >= 2, it will work with N = 1 and N = 0.
580
+ maxval = sys.maxsize - 1
581
+
582
+ # Avoid importing sympy at a module level
583
+ from torch.fx.experimental.symbolic_shapes import (
584
+ _constrain_range_for_size,
585
+ has_free_symbols,
586
+ )
587
+
588
+ if not has_free_symbols(arg.numel()):
589
+ # Don't upgrade the range if numel is less than two, since we then
590
+ # have an empty range which makes things go explodey. We also
591
+ # don't allow for 2 because that would specialize the unbacked
592
+ # SymInt to 2, which is also likely to be buggy.
593
+ if arg.numel() > 2:
594
+ maxval = int(arg.numel())
595
+
596
+ _constrain_range_for_size(nnz, max=maxval)
597
+
598
+ arg._nonzero_memo = nnz
599
+ arg._nonzero_memo_vc = arg._version
600
+
601
+ return arg.new_empty((arg.nonzero_memo, arg.dim()), dtype=torch.int64)
602
+
603
+
604
+ @register_op_impl(lambda func: func is torch.ops.aten.masked_select.default)
605
+ def masked_select(fake_mode, func, self, mask):
606
+ if (
607
+ fake_mode.shape_env is None
608
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
609
+ ):
610
+ # Without symints/symfloats, cannot handle this
611
+ raise DynamicOutputShapeException(func)
612
+
613
+ nnz = fake_mode.shape_env.create_unbacked_symint()
614
+
615
+ # see nonzero for commentary
616
+ maxval = sys.maxsize - 1
617
+
618
+ # Avoid importing sympy at a module level
619
+ from torch.fx.experimental.symbolic_shapes import (
620
+ _constrain_range_for_size,
621
+ has_free_symbols,
622
+ )
623
+
624
+ if not has_free_symbols(arg.numel()):
625
+ if arg.numel() >= 2:
626
+ maxval = int(arg.numel())
627
+
628
+ _constrain_range_for_size(nnz, max=maxval)
629
+
630
+ return self.new_empty((nnz,))
631
+
632
+
633
+ # NB: this must be ordered after local_scalar_dense
634
+ @register_op_impl(lambda func: torch.Tag.data_dependent_output in func.tags)
635
+ def data_dep(fake_mode, func, *args, **kwargs):
636
+ raise DataDependentOutputException(func)
637
+
638
+
639
+ # Bool Indices get Expanded as Masks
640
+ # See: IndexingUtils.h:expandTensors
641
+ def check_no_bool_index_tensors(func, self, indices):
642
+ for index in indices:
643
+ if index is not None and index.dtype in (torch.bool, torch.uint8):
644
+ raise DynamicOutputShapeException(func)
645
+
646
+
647
+ def run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs):
648
+ _, new_kwargs = normalize_function(
649
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
650
+ )
651
+
652
+ out_device = new_kwargs["input"].device
653
+ with in_kernel_invocation_manager(fake_mode):
654
+ out = func(*args, **kwargs)
655
+ if not is_noncontiguous_supported(out_device):
656
+ out = out.new_empty(out.shape)
657
+
658
+ if out is new_kwargs["input"]:
659
+ return out # copy_
660
+ return FakeTensor(fake_mode, out, out_device)
661
+
662
+
663
+ # Dont default to default device handling,
664
+ # Since op can take in non-zero sized cpu
665
+ # index tensors with cuda self
666
+ @register_op_impl(aten.index.Tensor)
667
+ def index_tensor(fake_mode, func, *args, **kwargs):
668
+ from torch._meta_registrations import meta_index_Tensor
669
+
670
+ _, new_kwargs = normalize_function(
671
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
672
+ )
673
+
674
+ out_device = new_kwargs["input"].device
675
+ # ensure nonzero call goes to fake tensor
676
+ with fake_mode:
677
+ out = meta_index_Tensor(*args, **kwargs)
678
+ return out.to(out_device)
679
+
680
+
681
+ # Can take mixed meta/non-meta arguments; the meta registration
682
+ # will roughly do the right thing even when given real devices
683
+ @register_op_impl(aten._embedding_bag.default)
684
+ def embedding_bag(fake_mode, func, *args, **kwargs):
685
+ from torch._meta_registrations import meta_embedding_bag
686
+
687
+ with fake_mode:
688
+ return meta_embedding_bag(*args, **kwargs)
689
+
690
+
691
+ # takes in multiple-devices, dont default to default device handling
692
+ @register_op_impl(aten._unsafe_index_put.default)
693
+ @register_op_impl(aten.copy.default)
694
+ @register_op_impl(aten.copy_.default)
695
+ @register_op_impl(aten.slice_scatter.default)
696
+ def multi_device_op_default(fake_mode, func, *args, **kwargs):
697
+ return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
698
+
699
+
700
+ # same with multi_device_op_default, but return the input
701
+ @register_op_impl(aten.copy.out)
702
+ @register_op_impl(aten.slice_scatter.out)
703
+ def multi_device_op_out(fake_mode, func, *args, **kwargs):
704
+ with in_kernel_invocation_manager(fake_mode):
705
+ out = func(*args, **kwargs)
706
+
707
+ _, new_kwargs = normalize_function(
708
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
709
+ )
710
+
711
+ return new_kwargs["input"]
712
+
713
+
714
+ @register_op_impl(aten.index_put.default)
715
+ @register_op_impl(aten.index_put_.default)
716
+ def index_put_impl(fake_mode, func, *args, **kwargs):
717
+ _, new_kwargs = normalize_function(
718
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
719
+ )
720
+
721
+ values = new_kwargs["values"]
722
+ self_device = new_kwargs["input"].fake_device
723
+ torch._check(
724
+ self_device == values.fake_device or (values.ndim == 0 and values.numel() == 1),
725
+ lambda: f"Mismatching {func} device between self ({self_device}) and values ({values.device})",
726
+ )
727
+
728
+ out = run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
729
+ if func is aten.index_put_.default:
730
+ return new_kwargs["input"]
731
+ else:
732
+ return out
733
+
734
+
735
+ @register_op_impl(lambda fn: fn in _device_not_kwarg_ops)
736
+ def nyi(fake_mode, func, *args, **kwargs):
737
+ assert func not in _device_not_kwarg_ops, f"NYI: {func}"
738
+
739
+
740
+ @register_op_impl(
741
+ lambda func: func in (aten.convolution.default, aten.convolution_backward.default)
742
+ )
743
+ def conv(fake_mode, func, *args, **kwargs):
744
+ _, kwargs = normalize_function(
745
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
746
+ )
747
+ device = kwargs["input"].fake_device
748
+ # need to re-enable mode so the tensors report fake device
749
+ with fake_mode:
750
+ # if the input is unsqueezed is done in Convolution.cpp we get segfault
751
+ k = kwargs["weight"].ndim
752
+ batch = kwargs["input"].shape[0]
753
+
754
+ # Avoid importing sympy at a module level
755
+ from torch.fx.experimental.symbolic_shapes import has_hint
756
+
757
+ if not has_hint(batch):
758
+ # TODO: We can make this a little more faithful with best effort
759
+ # channels last detection (but only if it's statically obvious!)
760
+ mem_fmt = None
761
+ elif k == 3 and not kwargs["input"].is_mkldnn and not kwargs["input"].is_xpu:
762
+ mem_fmt = None
763
+ else:
764
+ if func is aten.convolution.default:
765
+ conv_backend = torch._C._select_conv_backend(**kwargs)
766
+ else:
767
+ conv_backend = torch._C._select_conv_backend(
768
+ kwargs["input"],
769
+ kwargs["weight"],
770
+ bias=None,
771
+ stride=kwargs["stride"],
772
+ padding=kwargs["padding"],
773
+ dilation=kwargs["dilation"],
774
+ transposed=kwargs["transposed"],
775
+ output_padding=kwargs["output_padding"],
776
+ groups=kwargs["groups"],
777
+ bias_sizes=kwargs["bias_sizes"],
778
+ )
779
+ mem_fmt = torch._C._conv_determine_backend_memory_format(
780
+ kwargs["input"], kwargs["weight"], conv_backend
781
+ )
782
+
783
+ def convert(t, mem_fmt):
784
+ if t is None:
785
+ return t
786
+ if mem_fmt is not None:
787
+ t = t.to(memory_format=mem_fmt)
788
+ return FakeTensor(fake_mode, t, device)
789
+
790
+ with in_kernel_invocation_manager(fake_mode):
791
+ out = func(**kwargs)
792
+
793
+ if func is aten.convolution.default:
794
+ return convert(out, mem_fmt)
795
+ else:
796
+ return (
797
+ convert(out[0], mem_fmt),
798
+ convert(out[1], mem_fmt),
799
+ convert(out[2], None),
800
+ )
801
+
802
+
803
+ FAST_OP_IMPLEMENTATIONS = {}
804
+
805
+
806
+ # Unlike register_op_impl, these don't do the slow iteration for
807
+ # run_impl_check, and these run BEFORE decompositions
808
+ def register_fast_op_impl(func: OpOverload):
809
+ def impl_decorator(op_impl):
810
+ FAST_OP_IMPLEMENTATIONS[func] = op_impl
811
+ return op_impl
812
+
813
+ return impl_decorator
814
+
815
+
816
+ # infer_size_impl in ExpandUtils
817
+ def infer_size(a, b):
818
+ dimsA = len(a)
819
+ dimsB = len(b)
820
+ ndim = max(dimsA, dimsB)
821
+ expandedSizes = [0] * ndim
822
+ for i in range(ndim - 1, -1, -1):
823
+ offset = ndim - 1 - i
824
+ dimA = dimsA - 1 - offset
825
+ dimB = dimsB - 1 - offset
826
+ sizeA = a[dimA] if dimA >= 0 else 1
827
+ sizeB = b[dimB] if dimB >= 0 else 1
828
+
829
+ # NB: It is very important to test for broadcasting, before testing
830
+ # sizeA == sizeB. This is because the broadcasting tests are likely
831
+ # to be statically known (in particular, if sizeA/sizeB is unbacked
832
+ # but size-like, we will unsoundly assume they never equal 1), but
833
+ # the sizeA == sizeB test may not be statically known. However, once
834
+ # we have established that no broadcasting is happening, the
835
+ # sizeA == sizeB is now expect_true and we can defer it as a runtime
836
+ # assert (this works because Python will return the terminal
837
+ # expression of an or statement as-is, without bool()'ing it; if this
838
+ # were not the case, we'd need to write this using torch.sym_or() or
839
+ # something like that).
840
+ torch._check(
841
+ sizeA == 1 or sizeB == 1 or sizeA == sizeB,
842
+ lambda: f"The size of tensor a ({sizeA}) "
843
+ f"must match the size of tensor b ({sizeB}) "
844
+ f"at non-singleton dimension {i})",
845
+ )
846
+ expandedSizes[i] = sizeB if sizeA == 1 else sizeA
847
+ return tuple(expandedSizes)
848
+
849
+
850
+ def make_fast_binary_impl(slow_ref):
851
+ def fast_binary_impl(mode, *args, **kwargs):
852
+ def slow(msg):
853
+ count_label(f"slow {msg}")
854
+ with mode:
855
+ return slow_ref(*args, **kwargs)
856
+
857
+ count_label("attempt fast")
858
+
859
+ # Fast path (based off of TensorIterator fast path).
860
+ # Unfortunately, there is no way to easily deduplicate
861
+ # this with either the TensorIterator C++ implementation
862
+ # (which we don't want to SymIntify, and also the algorithm
863
+ # here is slightly different from TensorIterator to allow
864
+ # for broadcasting), nor the PrimTorch implementation
865
+ # (which does not actually implement a fast path.)
866
+
867
+ operands = args
868
+
869
+ # compute_shape
870
+ has_scalars = False
871
+ has_tensors = False
872
+ final_shape = None
873
+ for op in operands:
874
+ shape = op.shape if isinstance(op, torch.Tensor) else ()
875
+ if len(shape) == 0:
876
+ has_scalars = True
877
+ else:
878
+ has_tensors = True
879
+ if final_shape is None:
880
+ final_shape = shape
881
+ # TODO: Minor optimization: track if the shapes
882
+ # were equal so you can skip the equality check
883
+ # below if unnecessary
884
+ final_shape = infer_size(final_shape, shape)
885
+ assert final_shape is not None
886
+
887
+ # Do some extra safety checks to see if the output
888
+ # stride is obvious
889
+ for op in operands:
890
+ if isinstance(op, torch.Tensor) and op.shape == final_shape:
891
+ break
892
+ else:
893
+ return slow("both tensors nontrivially broadcast")
894
+
895
+ # compute_types
896
+ cpu = torch.device("cpu")
897
+ common_device = cpu
898
+ common_dtype = None
899
+ output_dtype = None
900
+ has_different_input_dtypes = False
901
+ for op in operands:
902
+ if not isinstance(op, torch.Tensor):
903
+ # Use elementwise_dtypes for the tricky case
904
+ has_different_input_dtypes = True
905
+ continue
906
+ if common_device == cpu and not op.device.type == "cpu":
907
+ common_device = op.device
908
+ # Slightly simplified here as target_dtype cannot vary
909
+ if common_dtype is None:
910
+ common_dtype = op.dtype
911
+ elif common_dtype != op.dtype:
912
+ has_different_input_dtypes = True
913
+
914
+ if has_different_input_dtypes:
915
+ # compute promotion
916
+ # TODO: we don't need the compute type
917
+ _, common_dtype = elementwise_dtypes(
918
+ *operands, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
919
+ )
920
+
921
+ # check all tensors on same device
922
+ # cpu scalars are assumed allow
923
+ current_cpu_scalars_on_non_cpu = 0
924
+ max_cpu_scalars_on_non_cpu = 1 # hard coded atm
925
+ for op in operands:
926
+ if not isinstance(op, torch.Tensor):
927
+ continue
928
+ if common_device != cpu and op.dim() == 0 and op.device == cpu:
929
+ if current_cpu_scalars_on_non_cpu >= max_cpu_scalars_on_non_cpu:
930
+ return slow("error")
931
+ current_cpu_scalars_on_non_cpu += 1
932
+ elif op.device != common_device:
933
+ return slow("error")
934
+
935
+ # compute_fast_setup_type
936
+ is_contiguous = True
937
+ is_channels_last = True
938
+ # TODO: is_non-overlapping_and_dense (not bound from Python
939
+ # no inplace, no out, everything defined
940
+
941
+ if is_noncontiguous_supported(common_device):
942
+ for op in operands:
943
+ if not isinstance(op, torch.Tensor):
944
+ continue
945
+ is_contiguous = is_contiguous and op.is_contiguous(
946
+ memory_format=torch.contiguous_format
947
+ )
948
+ is_channels_last = is_channels_last and op.is_contiguous(
949
+ memory_format=torch.channels_last
950
+ )
951
+ if is_contiguous:
952
+ # do contiguous
953
+ count_label("fast is_contiguous")
954
+ return FakeTensor(
955
+ mode,
956
+ torch.empty(
957
+ final_shape,
958
+ dtype=common_dtype,
959
+ device="meta",
960
+ memory_format=torch.contiguous_format,
961
+ ),
962
+ device=common_device,
963
+ )
964
+ if is_channels_last:
965
+ count_label("fast channels_last")
966
+ # do channels last
967
+ return FakeTensor(
968
+ mode,
969
+ torch.empty(
970
+ final_shape,
971
+ dtype=common_dtype,
972
+ device="meta",
973
+ memory_format=torch.channels_last,
974
+ ),
975
+ device=common_device,
976
+ )
977
+
978
+ return slow("no contiguity match")
979
+
980
+ return fast_binary_impl
981
+
982
+
983
+ @functools.lru_cache(None)
984
+ def get_fast_op_impls():
985
+ import torch._refs
986
+
987
+ register_fast_op_impl(torch.ops.aten.add.Tensor)(
988
+ make_fast_binary_impl(torch._refs.add)
989
+ )
990
+ register_fast_op_impl(torch.ops.aten.sub.Tensor)(
991
+ make_fast_binary_impl(torch._refs.sub)
992
+ )
993
+ register_fast_op_impl(torch.ops.aten.mul.Tensor)(make_fast_binary_impl(torch._refs.mul)) # type: ignore[has-type]
994
+ register_fast_op_impl(torch.ops.aten.div.Tensor)(
995
+ make_fast_binary_impl(torch._refs.div)
996
+ )
997
+ return FAST_OP_IMPLEMENTATIONS
998
+
999
+
1000
+ @functools.lru_cache(None)
1001
+ def init_cuda_context():
1002
+ # Backward will error with cuda Fake Tensors if no cuda tensors have been initialized first
1003
+ if torch.cuda.is_available():
1004
+ torch.empty(1, device="cuda") if torch.version.hip is None else torch.zeros(
1005
+ 1, device="cuda"
1006
+ )
1007
+
1008
+
1009
+ @contextlib.contextmanager
1010
+ def in_kernel_invocation_manager(fake_mode):
1011
+ # See: note [Fake Tensor Dispatch Keys]
1012
+ prev_in_kernel = fake_mode.in_kernel_invocation
1013
+ meta_in_tls = torch._C._meta_in_tls_dispatch_include()
1014
+ assert meta_in_tls == prev_in_kernel, f"{meta_in_tls}, {prev_in_kernel}"
1015
+
1016
+ guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
1017
+ fake_mode.in_kernel_invocation = True
1018
+ torch._C._set_meta_in_tls_dispatch_include(True)
1019
+ try:
1020
+ yield
1021
+ finally:
1022
+ fake_mode.in_kernel_invocation = prev_in_kernel
1023
+ torch._C._set_meta_in_tls_dispatch_include(prev_in_kernel)
1024
+ del guard
1025
+
1026
+
1027
+ # Return if the function allows Python numbers to bind to Tensors
1028
+ def should_allow_numbers_as_tensors(func: OpOverload):
1029
+ return torch._C._should_allow_numbers_as_tensors(
1030
+ func.name().split("::")[-1].split(".")[0]
1031
+ )
1032
+
1033
+
1034
+ class FakeTensorConfig:
1035
+ debug = os.environ.get("TORCH_FAKE_TENSOR_DEBUG", False)
1036
+
1037
+
1038
+ class FakeTensor(torch.Tensor):
1039
+ """
1040
+ Meta tensors give you the ability to run PyTorch code without having to
1041
+ actually do computation through tensors allocated on a `meta` device.
1042
+ Because the device is `meta`, meta tensors do not model device propagation.
1043
+ FakeTensor extends MetaTensors to also carry an additional `fake_device`
1044
+ which tracks devices that would have been used.
1045
+ """
1046
+
1047
+ fake_device: torch.device
1048
+ fake_mode: "FakeTensorMode"
1049
+ constant: Optional[torch.Tensor]
1050
+
1051
+ # This memorizes the unbacked SymInt representing the number of nonzero
1052
+ # elements in this tensor. This is helpful if you do something like
1053
+ # x[mask] and y[mask]; mask.nonzero() gets repeatedly called and should
1054
+ # give a consistent unbacked SymInt. It needs to be invalidated in the
1055
+ # same way constant is.
1056
+ # TODO: Generalize this as needed, e.g., into a trie of memos
1057
+ _nonzero_memo: Optional[torch.SymInt]
1058
+ _nonzero_memo_vc: Optional[int]
1059
+
1060
+ # Indicates to our torch_dispatch dispatching infra that
1061
+ # this is an "infra" mode with lower dispatching precedence.
1062
+ _mode_key = torch._C._TorchDispatchModeKey.FAKE
1063
+
1064
+ @property
1065
+ def nonzero_memo(self):
1066
+ if self._nonzero_memo is None:
1067
+ return None
1068
+ # Version counter based tracking isn't 100% sound but it's close
1069
+ # enough
1070
+ if self._nonzero_memo_vc != self._version:
1071
+ self._nonzero_memo = None
1072
+ return None
1073
+ return self._nonzero_memo
1074
+
1075
+ @property
1076
+ def device(self):
1077
+ if self.fake_mode.in_kernel_invocation:
1078
+ return torch.device("meta")
1079
+ else:
1080
+ return self.fake_device
1081
+
1082
+ # Note: [Fake Tensor Dispatch Keys]
1083
+ # In order to model the behavior of device-specific autocast
1084
+ # and autograd logic, we update the dispatch keys of FakeTensors
1085
+ # to reflect their fake device. This includes the BackendComponent
1086
+ # (DispatchKey::Meta -> DispatchKey::CUDA), and also the BackendComponent
1087
+ # related Autocast and Autograd keys. __torch__dispatch__ sits below
1088
+ # Autocast and Autograd, and is only invoked when we are at the
1089
+ # kernel for the BackendComponent. Then, we add Meta to the
1090
+ # thread-local dispatch include set to hit the meta kernel
1091
+ # instead of the kernel of the BackendComponent for the fake device.
1092
+ # The `device_for_backend_keys` does that below
1093
+ # NOTE: this probably will not do the right thing for backends
1094
+ # that have dispatch keys which are higher than the "meta" key:
1095
+ # https://github.com/pytorch/pytorch/blob/main/c10/core/DispatchKey.h#L189
1096
+
1097
+ @staticmethod
1098
+ def __new__(cls, fake_mode, elem, device, constant=None):
1099
+ self = torch.Tensor._make_subclass(
1100
+ cls,
1101
+ elem,
1102
+ elem.requires_grad,
1103
+ dispatch_device=True,
1104
+ device_for_backend_keys=device,
1105
+ )
1106
+
1107
+ assert elem.device.type == "meta", elem.device.type
1108
+ device = device if isinstance(device, torch.device) else torch.device(device)
1109
+ # NB: it is fine, if a little confusing, for device to be meta
1110
+ # (we are faking a meta tensor in that case). However, it often
1111
+ # indicates some sort of confusion (e.g., you accidentally passed
1112
+ # in a meta tensor when you should have passed in the real tensor).
1113
+ # So by default we disallow meta, and if you are working in a situation
1114
+ # where it is helpful (e.g., crossref testing) you can turn it back
1115
+ # on
1116
+ if not fake_mode.allow_meta:
1117
+ assert device.type != "meta"
1118
+ # normalize device.
1119
+ if device.type == "cuda":
1120
+ init_cuda_context()
1121
+
1122
+ if (
1123
+ device.type
1124
+ in ["cuda", "hpu", "xpu", torch._C._get_privateuse1_backend_name()]
1125
+ and device.index is None
1126
+ ):
1127
+ device = torch.device(
1128
+ f"{device.type}:{getattr(torch, device.type).current_device()}"
1129
+ )
1130
+ self.fake_device = device # type: ignore[attr-defined]
1131
+ self.fake_mode = fake_mode # type: ignore[attr-defined]
1132
+ self.constant = constant # type: ignore[attr-defined]
1133
+ self._nonzero_memo = None # type: ignore[attr-defined]
1134
+ self._nonzero_memo_vc = None # type: ignore[attr-defined]
1135
+
1136
+ if FakeTensorConfig.debug:
1137
+ import traceback
1138
+
1139
+ self._debug_trace = traceback.extract_stack() # type: ignore[attr-defined]
1140
+ return self
1141
+
1142
+ # In some circumstances, a conventional torch.Tensor constructor
1143
+ # will get rewritten to call into FakeTensor. We must provide an
1144
+ # __init__ method that can accept the Python interpreters initialization
1145
+ # in such a situation; we must also be able to handle direct fake
1146
+ # tensor construction via FakeTensor().
1147
+ #
1148
+ # In particular, the __init__ call will look funny in the following case:
1149
+ #
1150
+ # with FakeTensorMode():
1151
+ # x = torch.Tensor([1, 2, 3])
1152
+ #
1153
+ # this desugars into:
1154
+ #
1155
+ # with FakeTensorMode():
1156
+ # x = torch.Tensor.__new__([1, 2, 3])
1157
+ # # NB: x is a fake tensor, because of the mode!
1158
+ # x.__init__([1, 2, 3]) # not the normal fake tensor args!
1159
+ #
1160
+ def __init__(self, *args, **kwargs):
1161
+ super().__init__()
1162
+
1163
+ @staticmethod
1164
+ def from_tensor(t, fake_mode):
1165
+ return fake_mode.from_tensor(t)
1166
+
1167
+ @classmethod
1168
+ @count
1169
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
1170
+ # need to handle here to avoid infinite recursion
1171
+ # see [in_kernel_invocation]
1172
+ if func == torch.ops.prim.device.default:
1173
+ assert len(args) == 1 and isinstance(args[0], FakeTensor)
1174
+ if args[0].fake_mode.in_kernel_invocation:
1175
+ return torch.device("meta")
1176
+ else:
1177
+ return args[0].fake_device
1178
+
1179
+ # Because fake mode can return NotImplemented (if it sees a subclass
1180
+ # it doesn't know how to deal with), this test here is important
1181
+ # because the next dispatch after a fake mode will attempt to use
1182
+ # subclasses of tensors to dispatch, and any FakeTensor arguments
1183
+ # will be considered eligible.
1184
+ unrecognized_types = [
1185
+ t for t in types if not issubclass(t, FakeTensor) and t is not torch.Tensor
1186
+ ]
1187
+ if unrecognized_types:
1188
+ not_implemented_log.debug(
1189
+ "FakeTensor unrecognized subclass(es): %s", unrecognized_types
1190
+ )
1191
+ return NotImplemented
1192
+
1193
+ fake_mode = None
1194
+ for arg in pytree.arg_tree_leaves(*args, **kwargs):
1195
+ if isinstance(arg, FakeTensor):
1196
+ fake_mode = arg.fake_mode
1197
+ break
1198
+
1199
+ assert fake_mode is not None
1200
+
1201
+ # If the fake mode is already active, don't try to reapply it!
1202
+ # NotImplemented is the right thing to return here, because the
1203
+ # typical situation this can occur is if ProxyTensorMode returned a
1204
+ # NotImplemented because of a not implemented subclass; we may have
1205
+ # unluckily attempted to hit FakeTensor's dispatch first,
1206
+ # NotImplemented lets us keep chaining until we find the actual
1207
+ # subclass
1208
+ maybe_cur_fake_mode = torch._C._get_dispatch_mode(
1209
+ torch._C._TorchDispatchModeKey.FAKE
1210
+ )
1211
+ if maybe_cur_fake_mode:
1212
+ not_implemented_log.debug(
1213
+ "FakeTensor mode already active: %s in %s",
1214
+ fake_mode,
1215
+ maybe_cur_fake_mode,
1216
+ )
1217
+ return NotImplemented
1218
+
1219
+ with fake_mode: # type: ignore[attr-defined]
1220
+ return func(*args, **kwargs)
1221
+
1222
+ @staticmethod
1223
+ def _find_common_device(func, flat_args) -> Tuple[torch.device, bool]:
1224
+ # Returns: (common_device, has_scalar_only_inputs)
1225
+
1226
+ # cpu - zero-dim tensors can be called in cuda kernels,
1227
+ # so overwrite the common_device if it the only existing
1228
+ # device comes from a cpu zero-dim tensor
1229
+ common_device = None
1230
+ has_scalar_only_inputs = False
1231
+ is_cpu_zero_dim = None
1232
+
1233
+ def cpu_zero_dim(t):
1234
+ return t.device.type == "cpu" and t.dim() == 0
1235
+
1236
+ def merge_devices(t):
1237
+ nonlocal common_device
1238
+ nonlocal is_cpu_zero_dim
1239
+ if not isinstance(t, FakeTensor):
1240
+ return
1241
+
1242
+ if common_device is None:
1243
+ common_device = t.device
1244
+ is_cpu_zero_dim = cpu_zero_dim(t)
1245
+ return
1246
+
1247
+ t_is_cpu_zero_dim = cpu_zero_dim(t)
1248
+ if t.device == common_device:
1249
+ if is_cpu_zero_dim:
1250
+ is_cpu_zero_dim = t_is_cpu_zero_dim
1251
+ return
1252
+
1253
+ # mismatching devices !
1254
+ # if current tensor is cpu 0 dim, defer to existing device
1255
+ if t_is_cpu_zero_dim:
1256
+ return
1257
+
1258
+ # current device is from cpu 0 dim tensor, overwrite
1259
+ if is_cpu_zero_dim:
1260
+ common_device = t.device
1261
+ is_cpu_zero_dim = t_is_cpu_zero_dim
1262
+ return
1263
+
1264
+ # mismatching devices of non-zero dim tensors, throw
1265
+ # This might be valid behavior and need to be explicitly modeled, e.g. reshape_as
1266
+ raise RuntimeError(
1267
+ f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}"
1268
+ )
1269
+
1270
+ for arg in flat_args:
1271
+ merge_devices(arg)
1272
+
1273
+ # some functions that allow Python numbers to bind to Tensors
1274
+ # if we have failed to find a device, and we're running one of these operators,
1275
+ # we must have scalar only inputs
1276
+ if should_allow_numbers_as_tensors(func) and common_device is None:
1277
+ # ops with scalar only inputs always have result on cpu
1278
+ has_scalar_only_inputs = True
1279
+ common_device = torch.device("cpu")
1280
+
1281
+ assert common_device is not None, f"Could not find common device for {func}"
1282
+
1283
+ return common_device, has_scalar_only_inputs
1284
+
1285
+ # We must handle tolist in a special way for FakeTensors here in the case
1286
+ # where tolist is called from torch dispatch for tensor subclasses.
1287
+ # Ordinarily, if a program calls .tolist compiling still works because there is
1288
+ # special handling in dynamo, but for tensor subclasses if .tolist is called
1289
+ # inside torch dispatch, the .tolist call may be directly on a FakeTensor.
1290
+ # This would result in an error since wrapper subclasses don't have storage.
1291
+ # To avoid this, we handle the FakeTensor case by (1) specializing on the size
1292
+ # of the tensor to create the output Python list, and (2) creating unbacked
1293
+ # symints for each element of the list.
1294
+ def tolist(self):
1295
+ assert self.dim() == 1, "NYI for higher dims"
1296
+ shape_env = self.fake_mode.shape_env
1297
+ out = []
1298
+ # Specialize on the length of the list
1299
+ for _ in range(self.shape[0]):
1300
+ s = shape_env.create_unbacked_symint()
1301
+ # max value?
1302
+ torch._constrain_as_size(s, min=2)
1303
+ out.append(s)
1304
+ return out
1305
+
1306
+ __torch_function__ = torch._C._disabled_torch_function_impl
1307
+
1308
+
1309
+ # We keep one instantiation of `fake_tensor_converter` active
1310
+ # for the duration of `with FakeTensorMode()`.
1311
+ # This allows accurate storage aliasing across invocation of
1312
+ # different operators. While this will keep all freshly allocated
1313
+ # tensors alive during `FakeTensorMode`, there will no be no
1314
+ # new allocations of Tensors which have non-meta storage so
1315
+ # memory should not significantly increase.
1316
+
1317
+
1318
+ class FakeTensorMode(TorchDispatchMode):
1319
+ def __init__(
1320
+ self,
1321
+ *,
1322
+ allow_fallback_kernels=True,
1323
+ allow_non_fake_inputs=False,
1324
+ shape_env=None,
1325
+ static_shapes=None,
1326
+ ):
1327
+ log.debug("create_mode 0x%x", id(self))
1328
+ self.allow_fallback_kernels = allow_fallback_kernels
1329
+ self.fake_tensor_converter = FakeTensorConverter()
1330
+ if static_shapes is not None:
1331
+ self.static_shapes = static_shapes
1332
+ else:
1333
+ self.static_shapes = shape_env is None
1334
+
1335
+ import torch._functorch.config
1336
+
1337
+ self.allow_meta = torch._functorch.config.fake_tensor_allow_meta
1338
+
1339
+ # A flag that controls, whether we want to invoke ops on mix of
1340
+ # real weights/global variables and fake inputs
1341
+ self.allow_non_fake_inputs = allow_non_fake_inputs
1342
+
1343
+ # [in_kernel_invocation]
1344
+ # when FakeTensor is invoked in user code, .device should return
1345
+ # the fake_device of the tensor so that code such as as `if x.is_cuda`
1346
+ # or torch.zeros([10, 10], device=x.device) continues to execute as if
1347
+ # the FakeTensor were real. However, within kernel execution, we return
1348
+ # the `Meta` device because all computation within the kernels should
1349
+ # behave as if the Tensors are on meta devices. Kernels should allocate
1350
+ # new tensors on meta devices, and checks like `is_meta` should return true.
1351
+ # within python refs, we always return the real device by defining
1352
+ # the device property
1353
+ self.in_kernel_invocation = False
1354
+
1355
+ # True if we enter'ed and actually enabled fake tensor mode,
1356
+ # false if it was a no-op. Not thread safe but neither is
1357
+ # in_kernel_invocation
1358
+ # If another fake mode was already active when we enter, we also stash it here.
1359
+ # That way when we exit, we know to re-enable the previous fake mode.
1360
+ self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = []
1361
+
1362
+ self.shape_env = shape_env
1363
+
1364
+ self.stack = "".join(traceback.format_stack())
1365
+
1366
+ # Indicates to our torch_dispatch dispatching infra that
1367
+ # this is an "infra" mode with lower dispatching precedence.
1368
+ self._mode_key = torch._C._TorchDispatchModeKey.FAKE
1369
+
1370
+ # Typically, there is only one fake tensor mode and you test for it by
1371
+ # doing an isinstance test. However, in some situations, there might be
1372
+ # TWO fake tensor modes. The canonical example of this is exporting
1373
+ # a fake model: there is an outer fake mode created by the user, and
1374
+ # an inner fake mode created by Dynamo. The two phase process is required
1375
+ # because the outer fake mode typically won't have a ShapeEnv, even if
1376
+ # the user is interested in exporting with dynamic shapes (so the inner
1377
+ # fake mode will actually have a ShapeEnv and swap in symbolic sizes.)
1378
+ #
1379
+ # In this case, it's insufficient to test only one FakeTensor: you need
1380
+ # to distinguish between our fake tensor and other fake tensors. That's
1381
+ # what this function does.
1382
+ def is_our_fake(self, t):
1383
+ return isinstance(t, FakeTensor) and t.fake_mode is self
1384
+
1385
+ @count
1386
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
1387
+ # FakeTensorMode should not be set when we're inside of it.
1388
+ assert (
1389
+ torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is None
1390
+ ), func
1391
+ try:
1392
+ return self.dispatch(func, types, args, kwargs)
1393
+ except TypeError:
1394
+ log.exception("fake tensor raised TypeError")
1395
+ raise
1396
+
1397
+ # No-op if FakeTensorMode is already in use
1398
+ def __enter__(self):
1399
+ maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key)
1400
+ if self is not maybe_prev_fake_mode:
1401
+ self.enter_stack.append((True, maybe_prev_fake_mode))
1402
+ return super().__enter__()
1403
+ else:
1404
+ # no-op (still need to re-set the fake mode though since we unset it)
1405
+ torch._C._set_dispatch_mode(self)
1406
+ self.enter_stack.append((False, None))
1407
+ return self
1408
+
1409
+ def __exit__(self, a, b, c):
1410
+ live, maybe_prev_fake_mode = self.enter_stack.pop()
1411
+ if live:
1412
+ out = super().__exit__(a, b, c)
1413
+ # Re-enable the previous fake mode, if there was one.
1414
+ if maybe_prev_fake_mode is not None:
1415
+ torch._C._set_dispatch_mode(maybe_prev_fake_mode)
1416
+
1417
+ def dispatch(self, func, types, args=(), kwargs=None):
1418
+ kwargs = kwargs if kwargs else {}
1419
+ log.debug("%s %s %s", func, args, kwargs)
1420
+
1421
+ if func == torch.ops.prim.device.default:
1422
+ # NB: Don't use is_our_fake, just serve the fake information
1423
+ # as is. Notice we don't use 'self'; we use args[0].fake_mode
1424
+ # because they may not be the same. It would also be possible
1425
+ # to return NotImplemented here, in which case the FakeTensor
1426
+ # handler on args[0] would handle it, but we're being nice and
1427
+ # short-circuiting quickly.
1428
+ assert len(args) == 1 and isinstance(args[0], FakeTensor)
1429
+ if args[0].fake_mode.in_kernel_invocation:
1430
+ return torch.device("meta")
1431
+ else:
1432
+ return args[0].fake_device
1433
+ elif func is torch.ops.aten.size.default:
1434
+ return tuple(int(s) for s in args[0].size())
1435
+ elif func is torch.ops.aten.stride.default:
1436
+ return tuple(int(s) for s in args[0].stride())
1437
+ elif func is torch.ops.aten.storage_offset.default:
1438
+ return int(args[0].storage_offset())
1439
+
1440
+ if log.getEffectiveLevel() <= logging.DEBUG:
1441
+ log.debug(
1442
+ "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func
1443
+ )
1444
+ incr = IncrementRecursionCount()
1445
+
1446
+ # Some attribute queries that can be serviced directly
1447
+ # See Note [is_coalesced is dispatched]
1448
+ if func in {
1449
+ torch.ops.aten.is_coalesced.default,
1450
+ torch.ops.aten.dense_dim.default,
1451
+ torch.ops.aten.sparse_dim.default,
1452
+ }:
1453
+ # NB: no_dispatch is ok here too, this func is very simple
1454
+ with in_kernel_invocation_manager(self):
1455
+ return func(*args, **kwargs)
1456
+
1457
+ flat_args, args_spec = pytree.tree_flatten((args, kwargs))
1458
+
1459
+ flat_arg_fake_tensors = [
1460
+ t for t in flat_args if isinstance(t, FakeTensor) and self.is_our_fake(t)
1461
+ ]
1462
+ has_symbolic_sizes = any(
1463
+ i._has_symbolic_sizes_strides for i in flat_arg_fake_tensors
1464
+ ) or any(isinstance(a, torch.SymInt) for a in flat_args)
1465
+
1466
+ converter = self.fake_tensor_converter
1467
+
1468
+ def maybe_to_constant(t):
1469
+ if isinstance(t, FakeTensor) and self.is_our_fake(t):
1470
+ return t.constant
1471
+ else:
1472
+ return t
1473
+
1474
+ # To constant propagate through these functions:
1475
+ # 1, If this is a lift due to a torch.tensor call,
1476
+ # the input tensor is guaranteed to be a
1477
+ # constant, so we keep a copy of the original argument along so
1478
+ # we can query it if we're asked to item() it at some later point.
1479
+ # (Note that you can always call a lift fn manually, so we do
1480
+ # have to check if there are any fake tensors!)
1481
+ # 2, Some functions that allow Python numbers to bind to Tensors, e.g, torch.div
1482
+ if (func in self.lift_fns and not flat_arg_fake_tensors) or (
1483
+ should_allow_numbers_as_tensors(func)
1484
+ and not has_symbolic_sizes
1485
+ and not flat_arg_fake_tensors
1486
+ ):
1487
+ assert all(
1488
+ t.constant is not None for t in flat_arg_fake_tensors
1489
+ ), f"{func} should not have fake inputs without constants"
1490
+ const_flat_args = [maybe_to_constant(a) for a in flat_args]
1491
+ const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec)
1492
+ out = func(*const_args, **const_kwargs)
1493
+ if type(out) is torch.Tensor and self.may_turn_const(out):
1494
+ # NB: not in_kernel_invocation_manager because we're doing real
1495
+ # compute here
1496
+ # NB: no_dispatch() here is VERY DANGEROUS (like, segfault
1497
+ # dangerous) if this is actually a wrapper subclass tensor,
1498
+ # therefore the exact type test above
1499
+ with no_dispatch():
1500
+ out = out.clone()
1501
+ return converter(self, out, make_constant=True)
1502
+
1503
+ # See [subclass inputs] below
1504
+ # NB: If you're seeing a mysterious infinite loop involving fake
1505
+ # tensor, it might be related to this line. Though I'm not sure
1506
+ # how you'll know to read this comment, as this line won't show up
1507
+ # in the stack trace.
1508
+ unrecognized_types = self.check_for_subclass(flat_args)
1509
+ if unrecognized_types:
1510
+ not_implemented_log.debug(
1511
+ "FakeTensorMode unrecognized subclass(es): %s", unrecognized_types
1512
+ )
1513
+ return NotImplemented
1514
+
1515
+ # if we are in the dispatch mode, we will enter this function even if the inputs
1516
+ # are not FakeTensors. For now, throw if any non-Fake Tensor inputs
1517
+ # and just support constructors.
1518
+
1519
+ # this is generated from torch.tensor(), which does not use the
1520
+ # dispatcher, to allow wrapper subclasses to wrap the new tensor
1521
+ if func in self.lift_fns:
1522
+ assert len(kwargs) == 0 and len(args) == 1, f"{args} {kwargs}"
1523
+
1524
+ if type(args[0]) is torch.Tensor:
1525
+ return converter(self, args[0])
1526
+
1527
+ # Recompute flat_arg_fake_tensors here again in case some of the inputs
1528
+ # were real tensors and fakified in validate_and_convert_non_fake_tensors
1529
+ (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors(
1530
+ func, converter, flat_args, args_spec
1531
+ )
1532
+ del args, kwargs # Invalidated
1533
+
1534
+ # The current constant handling only support tracing systems
1535
+ # (aot autograd, torchdynamo) where each operation is run consecutively.
1536
+ # Because each operation is run in order, we can trace out and support
1537
+ # sequences like: x = torch.tensor(0.); y = x.add_(1)
1538
+ # Whenver a constant is written to but with inputs that cannot be evaluated
1539
+ # statically, such as random_(), we invalidate all constants that alias the input
1540
+ # We will rely on functionalization for use of fake tensors constants as persistent
1541
+ # objects on an FX Graph.
1542
+
1543
+ # We dispatch size/stride/numel on the FakeTensor not its constant, so bail on inplace_view
1544
+ all_constant = all(e.constant is not None for e in flat_arg_fake_tensors)
1545
+ if (
1546
+ torch.Tag.nondeterministic_seeded not in func.tags
1547
+ and torch.Tag.inplace_view not in func.tags
1548
+ and all_constant
1549
+ and len(flat_arg_fake_tensors) != 0
1550
+ and not has_symbolic_sizes
1551
+ ):
1552
+ const_flat_args = [maybe_to_constant(a) for a in flat_args]
1553
+ const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec)
1554
+
1555
+ # NB: not in_kernel_invocation_manager(self) as we want to do REAL
1556
+ # compute
1557
+ with no_dispatch():
1558
+ out = func(*const_args, **const_kwargs)
1559
+
1560
+ flat_out = pytree.tree_leaves(out)
1561
+ flat_out_tensors = [t for t in flat_out if isinstance(t, torch.Tensor)]
1562
+ all_constant = all(self.may_turn_const(t) for t in flat_out_tensors)
1563
+
1564
+ if all_constant:
1565
+ return pytree.tree_map_only(
1566
+ torch.Tensor,
1567
+ lambda t: converter(self, t, make_constant=True),
1568
+ out,
1569
+ )
1570
+
1571
+ # we weren't able to turn outputs to constants,
1572
+ # so invalidate all constants that might be aliases of the outputs
1573
+ for ten in flat_out_tensors:
1574
+ converter.invalidate_constant_aliases(ten)
1575
+
1576
+ # we are falling through to running non constant tensors, any input constant that
1577
+ # is written to must be invalidated
1578
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1579
+ self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs)
1580
+
1581
+ # Try for fastpath
1582
+ if has_symbolic_sizes:
1583
+ fast_impl = get_fast_op_impls().get(func)
1584
+ if fast_impl is not None:
1585
+ return fast_impl(self, *args, **kwargs)
1586
+
1587
+ # If there's a Python meta, prefer that over the decomposition
1588
+ from torch._decomp import meta_table as meta_table
1589
+
1590
+ if func not in meta_table and not self.cpp_meta_supports_symint(func):
1591
+ from torch._decomp import decomposition_table
1592
+
1593
+ # Prefer Python decompositions over C++ ones
1594
+ if func in decomposition_table and (
1595
+ has_symbolic_sizes
1596
+ or (
1597
+ # TODO: Remove these exclusions, so that we can remove
1598
+ # this leg entirely
1599
+ torch_decomp_decompositions(func)
1600
+ and all(not e.is_sparse for e in flat_arg_fake_tensors)
1601
+ )
1602
+ ):
1603
+ with self:
1604
+ return decomposition_table[func](*args, **kwargs)
1605
+
1606
+ with self:
1607
+ # Decomposes CompositeImplicitAutograd ops
1608
+ r = func.decompose(*args, **kwargs)
1609
+ if r is not NotImplemented:
1610
+ return r
1611
+
1612
+ # prims already wrap FakeTensor inputs to FakeTensor outputs
1613
+ # and do device logic, we dont need do anything but run them
1614
+ # and ensure that Meta kernels are dispatched to (see)
1615
+ # Fake Tensor Dispatch Keys
1616
+ # TODO - we should be use the prim aten impl
1617
+ # TODO - fix prims complex ops
1618
+ if (
1619
+ "prims::" in func._schema.name
1620
+ and hasattr(func, "prim_meta_impl")
1621
+ and not stride_incorrect_op(func)
1622
+ ):
1623
+ with self:
1624
+ return func.prim_meta_impl(*args, **kwargs)
1625
+
1626
+ # Users can register FakeTensor rules for custom operators
1627
+ # Call them if they exist.
1628
+ maybe_abstract_impl = torch._library.simple_registry.singleton.find(
1629
+ func.name()
1630
+ ).abstract_impl.kernel
1631
+ if maybe_abstract_impl:
1632
+ ctx = torch._library.abstract_impl.AbstractImplCtx(self.shape_env, func)
1633
+ with torch._library.abstract_impl.set_ctx_getter(lambda: ctx), self:
1634
+ result = maybe_abstract_impl(*args, **kwargs)
1635
+ return result
1636
+
1637
+ # special handling for funcs registered through `register_op_impl`,
1638
+ # e.g., manipulating args on constructor calls to construct meta tensors
1639
+ # and then afterwards wrapping them to a FakeTensor
1640
+ for run_impl_check, op_impl in op_implementations:
1641
+ if func in (
1642
+ aten._nested_tensor_from_tensor_list.default,
1643
+ aten._nested_tensor_from_tensor_list.out,
1644
+ ):
1645
+ raise UnsupportedOperatorException(
1646
+ "torch.compile does not support strided NestedTensor"
1647
+ )
1648
+ if run_impl_check(func):
1649
+ op_impl_out = op_impl(self, func, *args, **kwargs)
1650
+ if op_impl_out != NotImplemented:
1651
+ return op_impl_out
1652
+
1653
+ def can_run_unsafe_fallback(func: OpOverload):
1654
+ if not self.allow_fallback_kernels:
1655
+ return False
1656
+ # It's OK to try the fallback for built-in ops (e.g. aten, prims)
1657
+ # because we control and test these but the fallback leads to unexpected behavior
1658
+ # in user-defined custom ops
1659
+ #
1660
+ # WARNING: DO NOT add any additional namespaces/operators here if they refer to operators
1661
+ # outside of the pytorch/pytorch library! Any pre-existing things here
1662
+ # are either in the pytorch/pytorch library or have been grandfathered in.
1663
+ # The fallback does not always work and MAY CRASH and emit unreadable error messages
1664
+ # so it should not be allowed by default.
1665
+ allowed_namespaces = {
1666
+ "debugprims",
1667
+ "prims",
1668
+ "aten",
1669
+ "xla",
1670
+ "vision",
1671
+ "torchtext",
1672
+ "torchaudio",
1673
+ "quantized",
1674
+ }
1675
+ grandfathered_ops_FIXME = {
1676
+ "fbgemm::gmm",
1677
+ }
1678
+ return (
1679
+ func.namespace in allowed_namespaces
1680
+ or func.name() in grandfathered_ops_FIXME
1681
+ )
1682
+
1683
+ def maybe_run_unsafe_fallback(error=None):
1684
+ # We infer the meta of a custom ops that return None to just
1685
+ # return None. custom ops are not allowed to mutate metadata
1686
+ # of their inputs, so this is safe.
1687
+ from torch._higher_order_ops.auto_functionalize import (
1688
+ can_auto_functionalize,
1689
+ )
1690
+
1691
+ if can_auto_functionalize(func):
1692
+ return None
1693
+ # no meta kernel registered, fallback to kernel for the device
1694
+ if has_symbolic_sizes or not can_run_unsafe_fallback(func):
1695
+ raise UnsupportedOperatorException(func)
1696
+ if error is None:
1697
+ error = UnsupportedOperatorException(func)
1698
+ return run_fallback_kernel(self, func, flat_args, args_spec, error)
1699
+
1700
+ # Optimization: If there is no Meta kernel, it takes a surprisingly long
1701
+ # amount of time to catch the NotImplementedError, so we check it here.
1702
+ if not torch._C._dispatch_has_computed_kernel_for_dispatch_key(
1703
+ func.name(), "Meta"
1704
+ ):
1705
+ return maybe_run_unsafe_fallback()
1706
+
1707
+ # run kernel registered to meta for func, which include
1708
+ # python meta registrations, prims, decomps, and c++ meta fns (structured kernels)
1709
+ # It's possible that the kernel will return NotImplementedError
1710
+ try:
1711
+ with in_kernel_invocation_manager(self):
1712
+ r = func(*args, **kwargs)
1713
+ except NotImplementedError as not_implemented_error:
1714
+ return maybe_run_unsafe_fallback(not_implemented_error)
1715
+
1716
+ return self.wrap_meta_outputs_with_default_device_logic(
1717
+ r, func, flat_args, device=kwargs.get("device")
1718
+ )
1719
+
1720
+ # [subclass inputs]
1721
+ # Suppose we enable fake tensor mode. This means that fake tensor
1722
+ # mode will run first. But what if we do an operation that
1723
+ # involves a tensor subclass that will desugar into normal tensor
1724
+ # operations? Without returning NotImplemented, fake tensor mode will run first,
1725
+ # decide that a conversion was made (since there was a non fake
1726
+ # tensor argument), and report an error that converting non
1727
+ # fake tensor is not supported. What we actually wanted to happen
1728
+ # was to give the subclass a chance to figure out what it wants to
1729
+ # before erroring out. Returning NotImplemented here allows this.
1730
+ def check_for_subclass(self, flat_args):
1731
+ def check(x):
1732
+ return (
1733
+ isinstance(x, torch.Tensor)
1734
+ and not isinstance(x, FakeTensor)
1735
+ and type(x) is not torch.Tensor
1736
+ and type(x) is not torch.nn.Parameter
1737
+ )
1738
+
1739
+ return [type(x) for x in flat_args if check(x)]
1740
+
1741
+ def validate_and_convert_non_fake_tensors(
1742
+ self, func, converter, flat_args, args_spec
1743
+ ):
1744
+ """
1745
+ Checks if the list of tensors are fake tensors.
1746
+ If not, try to convert them to fake tensors.
1747
+ Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors.
1748
+ """
1749
+ flat_arg_fake_tensors = []
1750
+
1751
+ def validate(x):
1752
+ if not isinstance(x, torch.Tensor):
1753
+ return x
1754
+
1755
+ nonlocal flat_arg_fake_tensors
1756
+ if not self.is_our_fake(x):
1757
+ if torch.Tag.inplace_view in func.tags:
1758
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1759
+ raise Exception(
1760
+ f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}"
1761
+ )
1762
+ if not self.allow_non_fake_inputs:
1763
+ if isinstance(x, FakeTensor) and x.fake_mode is not self:
1764
+ raise AssertionError("Mixing fake modes NYI")
1765
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1766
+ raise Exception(
1767
+ f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode "
1768
+ f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}"
1769
+ )
1770
+
1771
+ x = converter(self, x)
1772
+
1773
+ flat_arg_fake_tensors.append(x)
1774
+ return x
1775
+
1776
+ validated_args = [validate(a) for a in flat_args]
1777
+ return validated_args, flat_arg_fake_tensors
1778
+
1779
+ def wrap_meta_outputs_with_default_device_logic(self, r, func, flat_args, device):
1780
+ converter = self.fake_tensor_converter
1781
+
1782
+ # Lazily initialized, in case there are no tensor returns
1783
+ common_device = None
1784
+ has_scalar_only_inputs = False
1785
+
1786
+ def wrap(e):
1787
+ nonlocal common_device
1788
+ nonlocal has_scalar_only_inputs
1789
+
1790
+ if isinstance(e, torch.Tensor) and common_device is None:
1791
+ (
1792
+ common_device,
1793
+ has_scalar_only_inputs,
1794
+ ) = FakeTensor._find_common_device(func, flat_args)
1795
+
1796
+ if self.is_our_fake(e):
1797
+ torch._check(
1798
+ e.device == common_device,
1799
+ lambda: f"FakeTensor is wrapped to wrong device, found {e.device}, expected {common_device}",
1800
+ )
1801
+
1802
+ if (
1803
+ isinstance(e, torch.Tensor)
1804
+ and not self.is_our_fake(e)
1805
+ and converter is not None
1806
+ ):
1807
+ if has_scalar_only_inputs:
1808
+ # Under FakeTensorMode, op accepts scalar only inputs, such as aten.add/sub/mul/div,
1809
+ # returns a real scalar tensor on CPU. See TensorMeta() in _prims/__init__.py for details.
1810
+ # We thus directly convert real tensor to fake tensor.
1811
+ return converter(self, e)
1812
+ else:
1813
+ return converter.from_meta_and_device(
1814
+ self, e, device or common_device
1815
+ )
1816
+ else:
1817
+ return e
1818
+
1819
+ return tree_map(wrap, r)
1820
+
1821
+ def cpp_meta_supports_symint(self, func):
1822
+ if torch.Tag.view_copy in func.tags:
1823
+ return True
1824
+ return func in [
1825
+ aten.empty.memory_format,
1826
+ aten.empty_strided.default,
1827
+ aten.as_strided_scatter.default,
1828
+ aten.as_strided.default,
1829
+ aten.as_strided_.default,
1830
+ aten.zeros.default,
1831
+ aten.detach.default,
1832
+ aten.view_as_real.default,
1833
+ aten.view_as_complex.default,
1834
+ aten.set_.source_Storage_storage_offset,
1835
+ aten._sparse_coo_tensor_with_dims_and_tensors.default,
1836
+ ]
1837
+
1838
+ @property
1839
+ def lift_fns(self):
1840
+ return (aten.lift_fresh.default, aten.lift_fresh_copy.default)
1841
+
1842
+ def may_turn_const(self, t):
1843
+ return (
1844
+ t.numel() <= CONSTANT_NUMEL_LIMIT
1845
+ and not t.is_sparse
1846
+ and not self.is_our_fake(t)
1847
+ and not t.device.type == "meta"
1848
+ )
1849
+
1850
+ def invalidate_written_to_constants(
1851
+ self, func, flat_arg_fake_tensors, args, kwargs
1852
+ ):
1853
+ any_constant = any(e.constant is not None for e in flat_arg_fake_tensors)
1854
+ schema_info = get_schema_info(func)
1855
+ if any_constant and schema_info.is_mutable():
1856
+ _, new_kwargs = normalize_function(
1857
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1858
+ )
1859
+ for k, v in new_kwargs.items():
1860
+ k = k if (k != "input" or schema_info.has_argument(k)) else "self"
1861
+ if (
1862
+ self.is_our_fake(v)
1863
+ and schema_info.is_mutable(k)
1864
+ and v.constant is not None
1865
+ ):
1866
+ self.fake_tensor_converter.invalidate_constant_aliases(v.constant)
1867
+
1868
+ def from_tensor(
1869
+ self,
1870
+ tensor,
1871
+ *,
1872
+ static_shapes=None,
1873
+ source: Optional[Source] = None,
1874
+ symbolic_context=None,
1875
+ # Setting this flag will force FakeTensorMode to return `None` if attempting to convert a tensor we have not
1876
+ # seen before.
1877
+ memoized_only=False,
1878
+ ):
1879
+ shape_env = self.shape_env
1880
+ if static_shapes is None:
1881
+ static_shapes = self.static_shapes
1882
+ if static_shapes:
1883
+ assert (
1884
+ symbolic_context is None
1885
+ ), "cannot set both static_shapes and symbolic_context"
1886
+ shape_env = None
1887
+ # see note [Tensor Fakification and Symbol Caching]
1888
+ if not symbolic_context and not source and not static_shapes:
1889
+ if tracing_context := torch._guards.TracingContext.try_get():
1890
+ if tensor in tracing_context.tensor_to_context:
1891
+ symbolic_context = tracing_context.tensor_to_context[tensor]
1892
+ source = symbolic_context.tensor_source
1893
+ return self.fake_tensor_converter(
1894
+ self,
1895
+ tensor,
1896
+ shape_env=shape_env,
1897
+ source=source,
1898
+ symbolic_context=symbolic_context,
1899
+ memoized_only=memoized_only,
1900
+ )
1901
+
1902
+
1903
+ # NB: returns fake tensors
1904
+ def run_fallback_kernel(
1905
+ fake_mode, func, flat_args, args_spec, orig_not_implemented_exception
1906
+ ):
1907
+ # these should all be supported, just to be safe
1908
+ # avoid fallback for operators which inplace modify metadata
1909
+ # because the input fake tensors would be umodified
1910
+ if torch.Tag.inplace_view in func.tags:
1911
+ raise orig_not_implemented_exception
1912
+
1913
+ inp_impls = {}
1914
+
1915
+ # Don't use in_kernel_invocation_manager(fake_mode) as we want to do
1916
+ # REAL compute (not with meta device)
1917
+ with no_dispatch():
1918
+
1919
+ def to_real_tensor(e):
1920
+ if fake_mode.is_our_fake(e):
1921
+ out = torch.zeros_like(e, device=e.fake_device)
1922
+ if e.is_sparse:
1923
+ out._coalesced_(e.is_coalesced())
1924
+ inp_impls[id(out)] = e
1925
+ return out
1926
+ return e
1927
+
1928
+ flat_args = [to_real_tensor(a) for a in flat_args]
1929
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1930
+
1931
+ r = func(*args, **kwargs)
1932
+
1933
+ tensor_impls = set()
1934
+ storages = set()
1935
+
1936
+ for e in flat_args:
1937
+ if isinstance(e, torch.Tensor):
1938
+ if not e.is_sparse:
1939
+ storages.add(e._typed_storage()._cdata)
1940
+
1941
+ # TODO: also check metadata change on inputs
1942
+ # proper aliasing/metadata relationship between outputs and inputs will
1943
+ # not be set up, bc of conversion to device, unless we can reuse an
1944
+ # input impl
1945
+
1946
+ def map_out(e):
1947
+ if id(e) not in inp_impls and (
1948
+ isinstance(e, torch.Tensor)
1949
+ and not e.is_sparse
1950
+ and e._typed_storage()._cdata in storages
1951
+ ):
1952
+ raise orig_not_implemented_exception
1953
+
1954
+ if isinstance(e, torch.Tensor):
1955
+ if id(e) in inp_impls:
1956
+ return inp_impls[id(e)]
1957
+ else:
1958
+ return fake_mode.fake_tensor_converter(fake_mode, e)
1959
+ else:
1960
+ return e
1961
+
1962
+ return pytree.tree_map(map_out, r)
1963
+
1964
+
1965
+ # Just for use to allow copying a module to fake tensors,
1966
+ # does not apply elsewhere
1967
+ class FakeCopyMode(TorchFunctionMode):
1968
+ def __init__(self, fake_mode):
1969
+ self.fake_mode = fake_mode
1970
+
1971
+ def __torch_function__(self, func, types, args=(), kwargs=None):
1972
+ kwargs = kwargs if kwargs else {}
1973
+
1974
+ # clone will get called in Parameter deepcopy
1975
+ if func == torch._C.TensorBase.clone:
1976
+ return func(
1977
+ self.fake_mode.from_tensor(args[0], static_shapes=True), **kwargs
1978
+ )
1979
+ elif func == torch.Tensor.__deepcopy__:
1980
+ assert len(args) == 2 and len(kwargs) == 0
1981
+ tensor, memo = args
1982
+
1983
+ if id(tensor) in memo:
1984
+ return memo[id(tensor)]
1985
+
1986
+ out = self.fake_mode.from_tensor(tensor, static_shapes=True)
1987
+ memo[id(tensor)] = out
1988
+ return out
1989
+ else:
1990
+ with torch._C.DisableTorchFunctionSubclass():
1991
+ return func(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ __all__ = [
4
+ "get_cpu_capability",
5
+ ]
6
+
7
+
8
+ def get_cpu_capability() -> str:
9
+ r"""Return cpu capability as a string value.
10
+
11
+ Possible values:
12
+ - "DEFAULT"
13
+ - "VSX"
14
+ - "Z VECTOR"
15
+ - "NO AVX"
16
+ - "AVX2"
17
+ - "AVX512"
18
+ """
19
+ return torch._C._get_cpu_capability()
env-llmeval/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (540 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import warnings
4
+ from contextlib import contextmanager
5
+
6
+ import torch
7
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
8
+
9
+ try:
10
+ from torch._C import _cudnn
11
+ except ImportError:
12
+ _cudnn = None # type: ignore[assignment]
13
+
14
+ # Write:
15
+ #
16
+ # torch.backends.cudnn.enabled = False
17
+ #
18
+ # to globally disable CuDNN/MIOpen
19
+
20
+ __cudnn_version = None
21
+
22
+ if _cudnn is not None:
23
+
24
+ def _init():
25
+ global __cudnn_version
26
+ if __cudnn_version is None:
27
+ __cudnn_version = _cudnn.getVersionInt()
28
+ runtime_version = _cudnn.getRuntimeVersion()
29
+ compile_version = _cudnn.getCompileVersion()
30
+ runtime_major, runtime_minor, _ = runtime_version
31
+ compile_major, compile_minor, _ = compile_version
32
+ # Different major versions are always incompatible
33
+ # Starting with cuDNN 7, minor versions are backwards-compatible
34
+ # Not sure about MIOpen (ROCm), so always do a strict check
35
+ if runtime_major != compile_major:
36
+ cudnn_compatible = False
37
+ elif runtime_major < 7 or not _cudnn.is_cuda:
38
+ cudnn_compatible = runtime_minor == compile_minor
39
+ else:
40
+ cudnn_compatible = runtime_minor >= compile_minor
41
+ if not cudnn_compatible:
42
+ if os.environ.get("PYTORCH_SKIP_CUDNN_COMPATIBILITY_CHECK", "0") == "1":
43
+ return True
44
+ base_error_msg = (
45
+ f"cuDNN version incompatibility: "
46
+ f"PyTorch was compiled against {compile_version} "
47
+ f"but found runtime version {runtime_version}. "
48
+ f"PyTorch already comes bundled with cuDNN. "
49
+ f"One option to resolving this error is to ensure PyTorch "
50
+ f"can find the bundled cuDNN. "
51
+ )
52
+
53
+ if "LD_LIBRARY_PATH" in os.environ:
54
+ ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
55
+ if any(
56
+ substring in ld_library_path for substring in ["cuda", "cudnn"]
57
+ ):
58
+ raise RuntimeError(
59
+ f"{base_error_msg}"
60
+ f"Looks like your LD_LIBRARY_PATH contains incompatible version of cudnn. "
61
+ f"Please either remove it from the path or install cudnn {compile_version}"
62
+ )
63
+ else:
64
+ raise RuntimeError(
65
+ f"{base_error_msg}"
66
+ f"one possibility is that there is a "
67
+ f"conflicting cuDNN in LD_LIBRARY_PATH."
68
+ )
69
+ else:
70
+ raise RuntimeError(base_error_msg)
71
+
72
+ return True
73
+
74
+ else:
75
+
76
+ def _init():
77
+ return False
78
+
79
+
80
+ def version():
81
+ """Return the version of cuDNN."""
82
+ if not _init():
83
+ return None
84
+ return __cudnn_version
85
+
86
+
87
+ CUDNN_TENSOR_DTYPES = {
88
+ torch.half,
89
+ torch.float,
90
+ torch.double,
91
+ }
92
+
93
+
94
+ def is_available():
95
+ r"""Return a bool indicating if CUDNN is currently available."""
96
+ return torch._C._has_cudnn
97
+
98
+
99
+ def is_acceptable(tensor):
100
+ if not torch._C._get_cudnn_enabled():
101
+ return False
102
+ if tensor.device.type != "cuda" or tensor.dtype not in CUDNN_TENSOR_DTYPES:
103
+ return False
104
+ if not is_available():
105
+ warnings.warn(
106
+ "PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild "
107
+ "PyTorch making sure the library is visible to the build system."
108
+ )
109
+ return False
110
+ if not _init():
111
+ warnings.warn(
112
+ "cuDNN/MIOpen library not found. Check your {libpath}".format(
113
+ libpath={"darwin": "DYLD_LIBRARY_PATH", "win32": "PATH"}.get(
114
+ sys.platform, "LD_LIBRARY_PATH"
115
+ )
116
+ )
117
+ )
118
+ return False
119
+ return True
120
+
121
+
122
+ def set_flags(
123
+ _enabled=None,
124
+ _benchmark=None,
125
+ _benchmark_limit=None,
126
+ _deterministic=None,
127
+ _allow_tf32=None,
128
+ ):
129
+ orig_flags = (
130
+ torch._C._get_cudnn_enabled(),
131
+ torch._C._get_cudnn_benchmark(),
132
+ None if not is_available() else torch._C._cuda_get_cudnn_benchmark_limit(),
133
+ torch._C._get_cudnn_deterministic(),
134
+ torch._C._get_cudnn_allow_tf32(),
135
+ )
136
+ if _enabled is not None:
137
+ torch._C._set_cudnn_enabled(_enabled)
138
+ if _benchmark is not None:
139
+ torch._C._set_cudnn_benchmark(_benchmark)
140
+ if _benchmark_limit is not None and is_available():
141
+ torch._C._cuda_set_cudnn_benchmark_limit(_benchmark_limit)
142
+ if _deterministic is not None:
143
+ torch._C._set_cudnn_deterministic(_deterministic)
144
+ if _allow_tf32 is not None:
145
+ torch._C._set_cudnn_allow_tf32(_allow_tf32)
146
+ return orig_flags
147
+
148
+
149
+ @contextmanager
150
+ def flags(
151
+ enabled=False,
152
+ benchmark=False,
153
+ benchmark_limit=10,
154
+ deterministic=False,
155
+ allow_tf32=True,
156
+ ):
157
+ with __allow_nonbracketed_mutation():
158
+ orig_flags = set_flags(
159
+ enabled, benchmark, benchmark_limit, deterministic, allow_tf32
160
+ )
161
+ try:
162
+ yield
163
+ finally:
164
+ # recover the previous values
165
+ with __allow_nonbracketed_mutation():
166
+ set_flags(*orig_flags)
167
+
168
+
169
+ # The magic here is to allow us to intercept code like this:
170
+ #
171
+ # torch.backends.<cudnn|mkldnn>.enabled = True
172
+
173
+
174
+ class CudnnModule(PropModule):
175
+ def __init__(self, m, name):
176
+ super().__init__(m, name)
177
+
178
+ enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled)
179
+ deterministic = ContextProp(
180
+ torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic
181
+ )
182
+ benchmark = ContextProp(
183
+ torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark
184
+ )
185
+ benchmark_limit = None
186
+ if is_available():
187
+ benchmark_limit = ContextProp(
188
+ torch._C._cuda_get_cudnn_benchmark_limit,
189
+ torch._C._cuda_set_cudnn_benchmark_limit,
190
+ )
191
+ allow_tf32 = ContextProp(
192
+ torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32
193
+ )
194
+
195
+
196
+ # This is the sys.modules replacement trick, see
197
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
198
+ sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__)
199
+
200
+ # Add type annotation for the replaced module
201
+ enabled: bool
202
+ deterministic: bool
203
+ benchmark: bool
204
+ allow_tf32: bool
205
+ benchmark_limit: int
env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.cuda
2
+
3
+ try:
4
+ from torch._C import _cudnn
5
+ except ImportError:
6
+ # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(),
7
+ # so it's safe to not emit any checks here.
8
+ _cudnn = None # type: ignore[assignment]
9
+
10
+
11
+ def get_cudnn_mode(mode):
12
+ if mode == "RNN_RELU":
13
+ return int(_cudnn.RNNMode.rnn_relu)
14
+ elif mode == "RNN_TANH":
15
+ return int(_cudnn.RNNMode.rnn_tanh)
16
+ elif mode == "LSTM":
17
+ return int(_cudnn.RNNMode.lstm)
18
+ elif mode == "GRU":
19
+ return int(_cudnn.RNNMode.gru)
20
+ else:
21
+ raise Exception(f"Unknown mode: {mode}")
22
+
23
+
24
+ # NB: We don't actually need this class anymore (in fact, we could serialize the
25
+ # dropout state for even better reproducibility), but it is kept for backwards
26
+ # compatibility for old models.
27
+ class Unserializable:
28
+ def __init__(self, inner):
29
+ self.inner = inner
30
+
31
+ def get(self):
32
+ return self.inner
33
+
34
+ def __getstate__(self):
35
+ # Note: can't return {}, because python2 won't call __setstate__
36
+ # if the value evaluates to False
37
+ return "<unserializable>"
38
+
39
+ def __setstate__(self, state):
40
+ self.inner = None
41
+
42
+
43
+ def init_dropout_state(dropout, train, dropout_seed, dropout_state):
44
+ dropout_desc_name = "desc_" + str(torch.cuda.current_device())
45
+ dropout_p = dropout if train else 0
46
+ if (dropout_desc_name not in dropout_state) or (
47
+ dropout_state[dropout_desc_name].get() is None
48
+ ):
49
+ if dropout_p == 0:
50
+ dropout_state[dropout_desc_name] = Unserializable(None)
51
+ else:
52
+ dropout_state[dropout_desc_name] = Unserializable(
53
+ torch._cudnn_init_dropout_state( # type: ignore[call-arg]
54
+ dropout_p,
55
+ train,
56
+ dropout_seed,
57
+ self_ty=torch.uint8,
58
+ device=torch.device("cuda"),
59
+ )
60
+ )
61
+ dropout_ts = dropout_state[dropout_desc_name].get()
62
+ return dropout_ts
env-llmeval/lib/python3.10/site-packages/torch/bin/torch_shm_manager ADDED
Binary file (35.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__init__.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import _C
2
+ from torch._C import _onnx as _C_onnx
3
+ from torch._C._onnx import (
4
+ _CAFFE2_ATEN_FALLBACK,
5
+ OperatorExportTypes,
6
+ TensorProtoDataType,
7
+ TrainingMode,
8
+ )
9
+
10
+ from . import ( # usort:skip. Keep the order instead of sorting lexicographically
11
+ _deprecation,
12
+ errors,
13
+ symbolic_caffe2,
14
+ symbolic_helper,
15
+ symbolic_opset7,
16
+ symbolic_opset8,
17
+ symbolic_opset9,
18
+ symbolic_opset10,
19
+ symbolic_opset11,
20
+ symbolic_opset12,
21
+ symbolic_opset13,
22
+ symbolic_opset14,
23
+ symbolic_opset15,
24
+ symbolic_opset16,
25
+ symbolic_opset17,
26
+ symbolic_opset18,
27
+ utils,
28
+ )
29
+
30
+ # TODO(After 1.13 release): Remove the deprecated SymbolicContext
31
+ from ._exporter_states import ExportTypes, SymbolicContext
32
+ from ._type_utils import JitScalarType
33
+ from .errors import CheckerError # Backwards compatibility
34
+ from .utils import (
35
+ _optimize_graph,
36
+ _run_symbolic_function,
37
+ _run_symbolic_method,
38
+ export,
39
+ export_to_pretty_string,
40
+ is_in_onnx_export,
41
+ register_custom_op_symbolic,
42
+ select_model_mode_for_export,
43
+ unregister_custom_op_symbolic,
44
+ )
45
+
46
+ from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import
47
+ DiagnosticOptions,
48
+ ExportOptions,
49
+ ONNXProgram,
50
+ ONNXProgramSerializer,
51
+ ONNXRuntimeOptions,
52
+ InvalidExportOptionsError,
53
+ OnnxExporterError,
54
+ OnnxRegistry,
55
+ dynamo_export,
56
+ enable_fake_mode,
57
+ )
58
+
59
+ from ._internal.onnxruntime import (
60
+ is_onnxrt_backend_supported,
61
+ OrtBackend as _OrtBackend,
62
+ OrtBackendOptions as _OrtBackendOptions,
63
+ OrtExecutionProvider as _OrtExecutionProvider,
64
+ )
65
+
66
+ __all__ = [
67
+ # Modules
68
+ "symbolic_helper",
69
+ "utils",
70
+ "errors",
71
+ # All opsets
72
+ "symbolic_caffe2",
73
+ "symbolic_opset7",
74
+ "symbolic_opset8",
75
+ "symbolic_opset9",
76
+ "symbolic_opset10",
77
+ "symbolic_opset11",
78
+ "symbolic_opset12",
79
+ "symbolic_opset13",
80
+ "symbolic_opset14",
81
+ "symbolic_opset15",
82
+ "symbolic_opset16",
83
+ "symbolic_opset17",
84
+ "symbolic_opset18",
85
+ # Enums
86
+ "ExportTypes",
87
+ "OperatorExportTypes",
88
+ "TrainingMode",
89
+ "TensorProtoDataType",
90
+ "JitScalarType",
91
+ # Public functions
92
+ "export",
93
+ "export_to_pretty_string",
94
+ "is_in_onnx_export",
95
+ "select_model_mode_for_export",
96
+ "register_custom_op_symbolic",
97
+ "unregister_custom_op_symbolic",
98
+ "disable_log",
99
+ "enable_log",
100
+ # Errors
101
+ "CheckerError", # Backwards compatibility
102
+ # Dynamo Exporter
103
+ "DiagnosticOptions",
104
+ "ExportOptions",
105
+ "ONNXProgram",
106
+ "ONNXProgramSerializer",
107
+ "ONNXRuntimeOptions",
108
+ "InvalidExportOptionsError",
109
+ "OnnxExporterError",
110
+ "OnnxRegistry",
111
+ "dynamo_export",
112
+ "enable_fake_mode",
113
+ # DORT / torch.compile
114
+ "is_onnxrt_backend_supported",
115
+ ]
116
+
117
+ # Set namespace for exposed private names
118
+ ExportTypes.__module__ = "torch.onnx"
119
+ JitScalarType.__module__ = "torch.onnx"
120
+ ExportOptions.__module__ = "torch.onnx"
121
+ ONNXProgram.__module__ = "torch.onnx"
122
+ ONNXProgramSerializer.__module__ = "torch.onnx"
123
+ ONNXRuntimeOptions.__module__ = "torch.onnx"
124
+ dynamo_export.__module__ = "torch.onnx"
125
+ InvalidExportOptionsError.__module__ = "torch.onnx"
126
+ OnnxExporterError.__module__ = "torch.onnx"
127
+ enable_fake_mode.__module__ = "torch.onnx"
128
+ OnnxRegistry.__module__ = "torch.onnx"
129
+ DiagnosticOptions.__module__ = "torch.onnx"
130
+ is_onnxrt_backend_supported.__module__ = "torch.onnx"
131
+ _OrtExecutionProvider.__module__ = "torch.onnx"
132
+ _OrtBackendOptions.__module__ = "torch.onnx"
133
+ _OrtBackend.__module__ = "torch.onnx"
134
+
135
+ producer_name = "pytorch"
136
+ producer_version = _C_onnx.PRODUCER_VERSION
137
+
138
+
139
+ @_deprecation.deprecated(
140
+ since="1.12.0", removed_in="2.0", instructions="use `torch.onnx.export` instead"
141
+ )
142
+ def _export(*args, **kwargs):
143
+ return utils._export(*args, **kwargs)
144
+
145
+
146
+ # TODO(justinchuby): Deprecate these logging functions in favor of the new diagnostic module.
147
+
148
+ # Returns True iff ONNX logging is turned on.
149
+ is_onnx_log_enabled = _C._jit_is_onnx_log_enabled
150
+
151
+
152
+ def enable_log() -> None:
153
+ r"""Enables ONNX logging."""
154
+ _C._jit_set_onnx_log_enabled(True)
155
+
156
+
157
+ def disable_log() -> None:
158
+ r"""Disables ONNX logging."""
159
+ _C._jit_set_onnx_log_enabled(False)
160
+
161
+
162
+ """Sets output stream for ONNX logging.
163
+
164
+ Args:
165
+ stream_name (str, default "stdout"): Only 'stdout' and 'stderr' are supported
166
+ as ``stream_name``.
167
+ """
168
+ set_log_stream = _C._jit_set_onnx_log_output_stream
169
+
170
+
171
+ """A simple logging facility for ONNX exporter.
172
+
173
+ Args:
174
+ args: Arguments are converted to string, concatenated together with a newline
175
+ character appended to the end, and flushed to output stream.
176
+ """
177
+ log = _C._jit_onnx_log
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_constants.cpython-310.pyc ADDED
Binary file (779 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_experimental.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_exporter_states.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_onnx_supported_ops.cpython-310.pyc ADDED
Binary file (3.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/_type_utils.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/errors.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/operators.cpython-310.pyc ADDED
Binary file (900 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_helper.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset11.cpython-310.pyc ADDED
Binary file (34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset12.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset16.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset18.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset7.cpython-310.pyc ADDED
Binary file (1.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset8.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc ADDED
Binary file (143 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_constants.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Constant values used in ONNX."""
2
+
3
+ ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
4
+
5
+ ONNX_BASE_OPSET = 9
6
+ ONNX_MIN_OPSET = 7
7
+ ONNX_MAX_OPSET = 19
8
+ ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 17
9
+ # ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py
10
+ ONNX_DEFAULT_OPSET = 17
11
+ ONNX_CONSTANT_FOLDING_MIN_OPSET = 9
12
+
13
+ PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"
14
+
15
+ INT64_MAX = 9223372036854775807
16
+ INT32_MAX = 2147483647
17
+ INT16_MAX = 32767
18
+ INT8_MAX = 127
19
+ UINT8_MAX = 255
20
+
21
+ INT64_MIN = -9223372036854775808
22
+ INT32_MIN = -2147483648
23
+ INT16_MIN = -32768
24
+ INT8_MIN = -128
25
+ UINT8_MIN = 0
env-llmeval/lib/python3.10/site-packages/torch/onnx/_deprecation.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility for deprecating functions."""
2
+
3
+ import functools
4
+ import textwrap
5
+ import warnings
6
+
7
+
8
+ def deprecated(since: str, removed_in: str, instructions: str):
9
+ """Marks functions as deprecated.
10
+
11
+ It will result in a warning when the function is called and a note in the
12
+ docstring.
13
+
14
+ Args:
15
+ since: The version when the function was first deprecated.
16
+ removed_in: The version when the function will be removed.
17
+ instructions: The action users should take.
18
+ """
19
+
20
+ def decorator(function):
21
+ @functools.wraps(function)
22
+ def wrapper(*args, **kwargs):
23
+ warnings.warn(
24
+ f"'{function.__module__}.{function.__name__}' "
25
+ f"is deprecated in version {since} and will be "
26
+ f"removed in {removed_in}. Please {instructions}.",
27
+ category=FutureWarning,
28
+ stacklevel=2,
29
+ )
30
+ return function(*args, **kwargs)
31
+
32
+ # Add a deprecation note to the docstring.
33
+ docstring = function.__doc__ or ""
34
+
35
+ # Add a note to the docstring.
36
+ deprecation_note = textwrap.dedent(
37
+ f"""\
38
+ .. deprecated:: {since}
39
+ Deprecated and will be removed in version {removed_in}.
40
+ Please {instructions}.
41
+ """
42
+ )
43
+
44
+ # Split docstring at first occurrence of newline
45
+ summary_and_body = docstring.split("\n\n", 1)
46
+
47
+ if len(summary_and_body) > 1:
48
+ summary, body = summary_and_body
49
+
50
+ # Dedent the body. We cannot do this with the presence of the summary because
51
+ # the body contains leading whitespaces when the summary does not.
52
+ body = textwrap.dedent(body)
53
+
54
+ new_docstring_parts = [deprecation_note, "\n\n", summary, body]
55
+ else:
56
+ summary = summary_and_body[0]
57
+
58
+ new_docstring_parts = [deprecation_note, "\n\n", summary]
59
+
60
+ wrapper.__doc__ = "".join(new_docstring_parts)
61
+
62
+ return wrapper
63
+
64
+ return decorator
env-llmeval/lib/python3.10/site-packages/torch/onnx/_exporter_states.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict
4
+
5
+ from torch import _C
6
+
7
+
8
+ class ExportTypes:
9
+ r"""Specifies how the ONNX model is stored."""
10
+
11
+ PROTOBUF_FILE = "Saves model in the specified protobuf file."
12
+ ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)."
13
+ COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)."
14
+ DIRECTORY = "Saves model in the specified folder."
15
+
16
+
17
+ class SymbolicContext:
18
+ """Extra context for symbolic functions.
19
+
20
+ Args:
21
+ params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue.
22
+ env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value.
23
+ cur_node (_C.Node): Current node being converted to ONNX domain.
24
+ onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ params_dict: Dict[str, _C.IValue],
30
+ env: dict,
31
+ cur_node: _C.Node,
32
+ onnx_block: _C.Block,
33
+ ):
34
+ self.params_dict: Dict[str, _C.IValue] = params_dict
35
+ self.env: Dict[_C.Value, _C.Value] = env
36
+ # Current node that is being converted.
37
+ self.cur_node: _C.Node = cur_node
38
+ # Current onnx block that converted nodes are being appended to.
39
+ self.onnx_block: _C.Block = onnx_block
env-llmeval/lib/python3.10/site-packages/torch/onnx/_globals.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Globals used internally by the ONNX exporter.
2
+
3
+ Do not use this module outside of `torch.onnx` and its tests.
4
+
5
+ Be very judicious when adding any new global variables. Do not create new global
6
+ variables unless they are absolutely necessary.
7
+ """
8
+ import torch._C._onnx as _C_onnx
9
+
10
+ # This module should only depend on _constants and nothing else in torch.onnx to keep
11
+ # dependency direction clean.
12
+ from torch.onnx import _constants
13
+
14
+
15
+ class _InternalGlobals:
16
+ """Globals used internally by ONNX exporter.
17
+
18
+ NOTE: Be very judicious when adding any new variables. Do not create new
19
+ global variables unless they are absolutely necessary.
20
+ """
21
+
22
+ def __init__(self):
23
+ self._export_onnx_opset_version = _constants.ONNX_DEFAULT_OPSET
24
+ self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL
25
+ self._in_onnx_export: bool = False
26
+ # Whether the user's model is training during export
27
+ self.export_training: bool = False
28
+ self.operator_export_type: _C_onnx.OperatorExportTypes = (
29
+ _C_onnx.OperatorExportTypes.ONNX
30
+ )
31
+ self.onnx_shape_inference: bool = True
32
+ self._autograd_inlining: bool = True
33
+
34
+ @property
35
+ def training_mode(self):
36
+ """The training mode for the exporter."""
37
+ return self._training_mode
38
+
39
+ @training_mode.setter
40
+ def training_mode(self, training_mode: _C_onnx.TrainingMode):
41
+ if not isinstance(training_mode, _C_onnx.TrainingMode):
42
+ raise TypeError(
43
+ "training_mode must be of type 'torch.onnx.TrainingMode'. This is "
44
+ "likely a bug in torch.onnx."
45
+ )
46
+ self._training_mode = training_mode
47
+
48
+ @property
49
+ def export_onnx_opset_version(self) -> int:
50
+ """Opset version used during export."""
51
+ return self._export_onnx_opset_version
52
+
53
+ @export_onnx_opset_version.setter
54
+ def export_onnx_opset_version(self, value: int):
55
+ supported_versions = range(
56
+ _constants.ONNX_MIN_OPSET, _constants.ONNX_MAX_OPSET + 1
57
+ )
58
+ if value not in supported_versions:
59
+ raise ValueError(f"Unsupported ONNX opset version: {value}")
60
+ self._export_onnx_opset_version = value
61
+
62
+ @property
63
+ def in_onnx_export(self) -> bool:
64
+ """Whether it is in the middle of ONNX export."""
65
+ return self._in_onnx_export
66
+
67
+ @in_onnx_export.setter
68
+ def in_onnx_export(self, value: bool):
69
+ if type(value) is not bool:
70
+ raise TypeError("in_onnx_export must be a boolean")
71
+ self._in_onnx_export = value
72
+
73
+ @property
74
+ def autograd_inlining(self) -> bool:
75
+ """Whether Autograd must be inlined."""
76
+ return self._autograd_inlining
77
+
78
+ @autograd_inlining.setter
79
+ def autograd_inlining(self, value: bool):
80
+ if type(value) is not bool:
81
+ raise TypeError("autograd_inlining must be a boolean")
82
+ self._autograd_inlining = value
83
+
84
+
85
+ GLOBALS = _InternalGlobals()
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/_beartype.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/exporter.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/io_adapter.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/jit_utils.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnx_proto_utils.cpython-310.pyc ADDED
Binary file (7.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/onnxruntime.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/__pycache__/registration.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/_beartype.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """An internal wrapper for the beartype library.
2
+
3
+ The module returns a no-op decorator when the beartype library is not installed.
4
+ """
5
+ import enum
6
+ import functools
7
+ import os
8
+ import traceback
9
+ import typing
10
+ import warnings
11
+ from types import ModuleType
12
+
13
+ try:
14
+ import beartype as _beartype_lib # type: ignore[import]
15
+ from beartype import roar as _roar # type: ignore[import]
16
+
17
+ # Beartype warns when we import from typing because the types are deprecated
18
+ # in Python 3.9. But there will be a long time until we can move to using
19
+ # the native container types for type annotations (when 3.9 is the lowest
20
+ # supported version). So we silence the warning.
21
+ warnings.filterwarnings(
22
+ "ignore",
23
+ category=_roar.BeartypeDecorHintPep585DeprecationWarning,
24
+ )
25
+
26
+ if _beartype_lib.__version__ == "0.16.0":
27
+ # beartype 0.16.0 has a bug that causes it to crash when used with
28
+ # PyTorch. See https://github.com/beartype/beartype/issues/282
29
+ warnings.warn("beartype 0.16.0 is not supported. Please upgrade to 0.16.1+.")
30
+ _beartype_lib = None # type: ignore[assignment]
31
+ except ImportError:
32
+ _beartype_lib = None # type: ignore[assignment]
33
+ except Exception as e:
34
+ # Warn errors that are not import errors (unexpected).
35
+ warnings.warn(f"{e}")
36
+ _beartype_lib = None # type: ignore[assignment]
37
+
38
+
39
+ @enum.unique
40
+ class RuntimeTypeCheckState(enum.Enum):
41
+ """Runtime type check state."""
42
+
43
+ # Runtime type checking is disabled.
44
+ DISABLED = enum.auto()
45
+ # Runtime type checking is enabled but warnings are shown only.
46
+ WARNINGS = enum.auto()
47
+ # Runtime type checking is enabled.
48
+ ERRORS = enum.auto()
49
+
50
+
51
+ class CallHintViolationWarning(UserWarning):
52
+ """Warning raised when a type hint is violated during a function call."""
53
+
54
+ pass
55
+
56
+
57
+ def _no_op_decorator(func):
58
+ return func
59
+
60
+
61
+ def _create_beartype_decorator(
62
+ runtime_check_state: RuntimeTypeCheckState,
63
+ ):
64
+ # beartype needs to be imported outside of the function and aliased because
65
+ # this module overwrites the name "beartype".
66
+
67
+ if runtime_check_state == RuntimeTypeCheckState.DISABLED:
68
+ return _no_op_decorator
69
+ if _beartype_lib is None:
70
+ # If the beartype library is not installed, return a no-op decorator
71
+ return _no_op_decorator
72
+
73
+ assert isinstance(_beartype_lib, ModuleType)
74
+
75
+ if runtime_check_state == RuntimeTypeCheckState.ERRORS:
76
+ # Enable runtime type checking which errors on any type hint violation.
77
+ return _beartype_lib.beartype
78
+
79
+ # Warnings only
80
+ def beartype(func):
81
+ """Warn on type hint violation."""
82
+
83
+ if "return" in func.__annotations__:
84
+ # Remove the return type from the func function's
85
+ # annotations so that the beartype decorator does not complain
86
+ # about the return type.
87
+ return_type = func.__annotations__["return"]
88
+ del func.__annotations__["return"]
89
+ beartyped = _beartype_lib.beartype(func)
90
+ # Restore the return type to the func function's annotations
91
+ func.__annotations__["return"] = return_type
92
+ else:
93
+ beartyped = _beartype_lib.beartype(func)
94
+
95
+ @functools.wraps(func)
96
+ def _coerce_beartype_exceptions_to_warnings(*args, **kwargs):
97
+ try:
98
+ return beartyped(*args, **kwargs)
99
+ except _roar.BeartypeCallHintParamViolation:
100
+ # Fall back to the original function if the beartype hint is violated.
101
+ warnings.warn(
102
+ traceback.format_exc(),
103
+ category=CallHintViolationWarning,
104
+ stacklevel=2,
105
+ )
106
+
107
+ return func(*args, **kwargs) # noqa: B012
108
+
109
+ return _coerce_beartype_exceptions_to_warnings
110
+
111
+ return beartype
112
+
113
+
114
+ if typing.TYPE_CHECKING:
115
+ # This is a hack to make mypy play nicely with the beartype decorator.
116
+ def beartype(func):
117
+ return func
118
+
119
+ else:
120
+ _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK = os.getenv(
121
+ "TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK"
122
+ )
123
+ if _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "WARNINGS":
124
+ _runtime_type_check_state = RuntimeTypeCheckState.WARNINGS
125
+ elif _TORCH_ONNX_EXPERIMENTAL_RUNTIME_TYPE_CHECK == "DISABLED":
126
+ _runtime_type_check_state = RuntimeTypeCheckState.DISABLED
127
+ else:
128
+ _runtime_type_check_state = RuntimeTypeCheckState.ERRORS
129
+ beartype = _create_beartype_decorator(_runtime_type_check_state)
130
+ # Make sure that the beartype decorator is enabled whichever path we took.
131
+ assert beartype is not None
env-llmeval/lib/python3.10/site-packages/torch/onnx/_internal/diagnostics/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._diagnostic import (
2
+ create_export_diagnostic_context,
3
+ diagnose,
4
+ engine,
5
+ export_context,
6
+ ExportDiagnosticEngine,
7
+ TorchScriptOnnxExportDiagnostic,
8
+ )
9
+ from ._rules import rules
10
+ from .infra import levels
11
+
12
+ __all__ = [
13
+ "TorchScriptOnnxExportDiagnostic",
14
+ "ExportDiagnosticEngine",
15
+ "rules",
16
+ "levels",
17
+ "engine",
18
+ "export_context",
19
+ "create_export_diagnostic_context",
20
+ "diagnose",
21
+ ]