applied-ai-018 commited on
Commit
590f3ca
·
verified ·
1 Parent(s): 8ab7384

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/_refs/__init__.py +0 -0
  6. venv/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_refs/_conversions.py +118 -0
  10. venv/lib/python3.10/site-packages/torch/_refs/fft.py +590 -0
  11. venv/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py +308 -0
  12. venv/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_refs/nn/__init__.py +3 -0
  14. venv/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py +1230 -0
  16. venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_refs/special/__init__.py +236 -0
  18. venv/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_subclasses/__init__.py +18 -0
  20. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_impls.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/_subclasses/fake_impls.py +1061 -0
  28. venv/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py +1819 -0
  29. venv/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py +190 -0
  30. venv/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py +653 -0
  31. venv/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py +987 -0
  32. venv/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py +198 -0
  33. venv/lib/python3.10/site-packages/torch/export/__init__.py +344 -0
  34. venv/lib/python3.10/site-packages/torch/export/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_auto_functionalized_pass.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_effect_tokens_pass.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/export/__pycache__/_safeguard.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/export/__pycache__/_trace.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/export/__pycache__/_tree_utils.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/export/__pycache__/_unlift.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/export/__pycache__/custom_obj.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/export/__pycache__/dynamic_shapes.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/export/__pycache__/exported_program.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/export/__pycache__/graph_signature.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/export/__pycache__/unflatten.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/export/_remove_auto_functionalized_pass.py +93 -0
  47. venv/lib/python3.10/site-packages/torch/export/_remove_effect_tokens_pass.py +126 -0
  48. venv/lib/python3.10/site-packages/torch/export/_safeguard.py +42 -0
  49. venv/lib/python3.10/site-packages/torch/export/_trace.py +1060 -0
  50. venv/lib/python3.10/site-packages/torch/export/_tree_utils.py +64 -0
ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c54f008b0420ce9a6543fa4182978fce98614d47bacc66e120dc1cd698570c2e
3
+ size 50332828
ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa64a7e8c84a4aa24d5606e708ef41c553e667c8d9d564cf1c782ed0b68b00b8
3
+ size 50332843
ckpts/universal/global_step120/zero/15.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81dc8161845dd2f7ae322f702606ddce30ba8d1c358ee6db16a32a758a3a5917
3
+ size 50332749
ckpts/universal/global_step120/zero/16.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab35ef16672e672908d6ea232361665000eeae86cdeea20c355c84dc5b2e0498
3
+ size 16778396
venv/lib/python3.10/site-packages/torch/_refs/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (141 kB). View file
 
venv/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
venv/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
venv/lib/python3.10/site-packages/torch/_refs/_conversions.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch._prims_common as utils
3
+
4
+ # Utilities should come BEFORE this import
5
+ from torch._decomp import register_decomposition
6
+
7
+ from torch._prims_common import TensorLikeType
8
+ from torch._prims_common.wrappers import out_wrapper
9
+ from torch._refs import _broadcast_shapes
10
+
11
+ # Data conversion references.
12
+ #
13
+ # Note: this module breaks the usual _refs to torch naming scheme where
14
+ # _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not
15
+ # part of _refs/__init__.py to avoid name clashes with Python builtin types
16
+ # (like int).
17
+
18
+ __all__ = [
19
+ # dtypes
20
+ "bfloat16",
21
+ "bool",
22
+ "byte",
23
+ "cdouble",
24
+ "cfloat",
25
+ "chalf",
26
+ "char",
27
+ "double",
28
+ "float",
29
+ "half",
30
+ "int",
31
+ "long",
32
+ "short",
33
+ # misc
34
+ "complex",
35
+ "polar",
36
+ ]
37
+
38
+
39
+ def _make_conversion_method(name: str, dtype: torch.dtype):
40
+ def fn(
41
+ self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format
42
+ ) -> TensorLikeType:
43
+ return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload]
44
+
45
+ fn.__name__ = name
46
+ return fn
47
+
48
+
49
+ bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16)
50
+
51
+ bool = _make_conversion_method("bool", torch.bool)
52
+
53
+ byte = _make_conversion_method("byte", torch.uint8)
54
+
55
+ cdouble = _make_conversion_method("cdouble", torch.cdouble)
56
+
57
+ cfloat = _make_conversion_method("cfloat", torch.cfloat)
58
+
59
+ chalf = _make_conversion_method("chalf", torch.complex32)
60
+
61
+ char = _make_conversion_method("char", torch.int8)
62
+
63
+ double = _make_conversion_method("double", torch.double)
64
+
65
+ float = _make_conversion_method("float", torch.float)
66
+
67
+ half = _make_conversion_method("half", torch.half)
68
+
69
+ int = _make_conversion_method("int", torch.int)
70
+
71
+ long = _make_conversion_method("long", torch.long)
72
+
73
+ short = _make_conversion_method("short", torch.short)
74
+
75
+
76
+ @register_decomposition(torch._ops.ops.aten.complex)
77
+ # Note: complex has type promotion tests disabled due to different semantics.
78
+ # exact_dtype is for compat with complex_check_dtype from core.
79
+ @out_wrapper(exact_dtype=True)
80
+ def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType:
81
+ allowed_dtypes = (torch.float32, torch.float64, torch.float16)
82
+ torch._check(
83
+ real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes,
84
+ lambda: (
85
+ f"Expected both inputs to be Half, Float or Double tensors but got "
86
+ f"{real.dtype} and {imag.dtype}"
87
+ ),
88
+ )
89
+ torch._check(
90
+ real.dtype == imag.dtype,
91
+ lambda: (
92
+ f"Expected object of scalar type {real.dtype} but got "
93
+ f"scalar type {imag.dtype} for second argument"
94
+ ),
95
+ )
96
+ result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type]
97
+ common_shape = _broadcast_shapes(real.shape, imag.shape)
98
+ result = real.new_empty(
99
+ common_shape,
100
+ dtype=result_dtype,
101
+ layout=real.layout,
102
+ device=real.device,
103
+ # pin_memory=real.is_pinned(), # NYI
104
+ )
105
+ result.real = real
106
+ result.imag = imag
107
+ return result
108
+
109
+
110
+ @register_decomposition(torch._ops.ops.aten.polar)
111
+ # Note: polar has type promotion tests disabled due to different semantics.
112
+ # exact_dtype is for compat with complex_check_dtype from core.
113
+ @out_wrapper(exact_dtype=True)
114
+ def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType:
115
+ result = torch.complex(abs, angle)
116
+ result.real = abs * torch.cos(angle)
117
+ result.imag = abs * torch.sin(angle)
118
+ return result
venv/lib/python3.10/site-packages/torch/_refs/fft.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union
4
+
5
+ import torch
6
+ import torch._prims as prims
7
+ import torch._prims_common as utils
8
+ from torch._decomp import register_decomposition
9
+ from torch._prims_common import DimsType, ShapeType, TensorLikeType
10
+ from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper
11
+
12
+ __all__ = [
13
+ # Transforms
14
+ "fft",
15
+ "fft2",
16
+ "fftn",
17
+ "hfft",
18
+ "hfft2",
19
+ "hfftn",
20
+ "rfft",
21
+ "rfft2",
22
+ "rfftn",
23
+ "ifft",
24
+ "ifft2",
25
+ "ifftn",
26
+ "ihfft",
27
+ "ihfft2",
28
+ "ihfftn",
29
+ "irfft",
30
+ "irfft2",
31
+ "irfftn",
32
+ # Helpers
33
+ "fftshift",
34
+ "ifftshift",
35
+ ]
36
+
37
+ NormType = Union[None, Literal["forward", "backward", "ortho"]]
38
+ _NORM_VALUES = {None, "forward", "backward", "ortho"}
39
+ aten = torch._ops.ops.aten
40
+
41
+
42
+ def _apply_norm(
43
+ x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool
44
+ ) -> TensorLikeType:
45
+ """Apply normalization to the un-normalized FFT result"""
46
+ torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}")
47
+
48
+ if norm == "ortho":
49
+ return x * (1 / math.sqrt(signal_numel))
50
+
51
+ normalize = (not forward and (norm is None or norm == "backward")) or (
52
+ forward and norm == "forward"
53
+ )
54
+ return x * (1 / signal_numel) if normalize else x
55
+
56
+
57
+ def _promote_type_fft(
58
+ dtype: torch.dtype, require_complex: bool, device: torch.device
59
+ ) -> torch.dtype:
60
+ """Helper to promote a dtype to one supported by the FFT primitives"""
61
+ if dtype.is_complex:
62
+ return dtype
63
+
64
+ # Promote integral to default float type
65
+ if not dtype.is_floating_point:
66
+ dtype = torch.get_default_dtype()
67
+
68
+ allowed_types = [torch.float32, torch.float64]
69
+ maybe_support_half = device.type in ["cuda", "meta"]
70
+
71
+ if maybe_support_half:
72
+ allowed_types.append(torch.float16)
73
+ torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}")
74
+
75
+ if require_complex:
76
+ dtype = utils.corresponding_complex_dtype(dtype)
77
+
78
+ return dtype
79
+
80
+
81
+ def _maybe_promote_tensor_fft(
82
+ t: TensorLikeType, require_complex: bool = False
83
+ ) -> TensorLikeType:
84
+ """Helper to promote a tensor to a dtype supported by the FFT primitives"""
85
+ cur_type = t.dtype
86
+ new_type = _promote_type_fft(cur_type, require_complex, t.device)
87
+ return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value]
88
+
89
+
90
+ def _resize_fft_input(
91
+ x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...]
92
+ ) -> TensorLikeType:
93
+ """
94
+ Fixes the shape of x such that x.size(dims[i]) == sizes[i],
95
+ either by zero-padding, or by slicing x starting from 0.
96
+ """
97
+ assert len(dims) == len(sizes)
98
+ must_copy = False
99
+ x_sizes = x.shape
100
+ pad_amount = [0] * len(x_sizes) * 2
101
+ for i in range(len(dims)):
102
+ if sizes[i] == -1:
103
+ continue
104
+
105
+ if x_sizes[dims[i]] < sizes[i]:
106
+ must_copy = True
107
+ pad_idx = len(pad_amount) - 2 * dims[i] - 1
108
+ pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]
109
+
110
+ if x_sizes[dims[i]] > sizes[i]:
111
+ x = x.narrow(dims[i], 0, sizes[i])
112
+
113
+ return torch.constant_pad_nd(x, pad_amount) if must_copy else x
114
+
115
+
116
+ def _fft_c2r(
117
+ func_name: str,
118
+ input: TensorLikeType,
119
+ n: Optional[int],
120
+ dim: int,
121
+ norm: NormType,
122
+ forward: bool,
123
+ ) -> TensorLikeType:
124
+ """Common code for performing any complex to real FFT (irfft or hfft)"""
125
+ input = _maybe_promote_tensor_fft(input, require_complex=True)
126
+ dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
127
+ last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
128
+ torch._check(
129
+ last_dim_size >= 1,
130
+ lambda: f"Invalid number of data points ({last_dim_size}) specified",
131
+ )
132
+
133
+ if n is not None:
134
+ input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))
135
+
136
+ if forward:
137
+ input = torch.conj(input)
138
+
139
+ output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
140
+ return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)
141
+
142
+
143
+ def _fft_r2c(
144
+ func_name: str,
145
+ input: TensorLikeType,
146
+ n: Optional[int],
147
+ dim: int,
148
+ norm: NormType,
149
+ forward: bool,
150
+ onesided: bool,
151
+ ) -> TensorLikeType:
152
+ """Common code for performing any real to complex FFT (rfft or ihfft)"""
153
+ torch._check(
154
+ not input.dtype.is_complex,
155
+ lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}",
156
+ )
157
+ input = _maybe_promote_tensor_fft(input)
158
+ dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
159
+ dim_size = n if n is not None else input.shape[dim]
160
+ torch._check(
161
+ dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
162
+ )
163
+
164
+ if n is not None:
165
+ input = _resize_fft_input(input, dims, (n,))
166
+
167
+ ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
168
+ ret = _apply_norm(ret, norm, dim_size, forward)
169
+ return ret if forward else torch.conj(ret)
170
+
171
+
172
+ def _fft_c2c(
173
+ func_name: str,
174
+ input: TensorLikeType,
175
+ n: Optional[int],
176
+ dim: int,
177
+ norm: NormType,
178
+ forward: bool,
179
+ ) -> TensorLikeType:
180
+ """Common code for performing any complex to complex FFT (fft or ifft)"""
181
+ torch._check(
182
+ input.dtype.is_complex,
183
+ lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}",
184
+ )
185
+ dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
186
+ dim_size = n if n is not None else input.shape[dim]
187
+ torch._check(
188
+ dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
189
+ )
190
+
191
+ if n is not None:
192
+ input = _resize_fft_input(input, dims, (n,))
193
+
194
+ ret = prims.fft_c2c(input, dim=dims, forward=forward)
195
+ return _apply_norm(ret, norm, dim_size, forward)
196
+
197
+
198
+ @register_decomposition(aten.fft_fft)
199
+ @out_wrapper()
200
+ def fft(
201
+ input: TensorLikeType,
202
+ n: Optional[int] = None,
203
+ dim: int = -1,
204
+ norm: NormType = None,
205
+ ) -> TensorLikeType:
206
+ if input.dtype.is_complex:
207
+ return _fft_c2c("fft", input, n, dim, norm, forward=True)
208
+ else:
209
+ return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
210
+
211
+
212
+ @register_decomposition(aten.fft_ifft)
213
+ @out_wrapper()
214
+ def ifft(
215
+ input: TensorLikeType,
216
+ n: Optional[int] = None,
217
+ dim: int = -1,
218
+ norm: NormType = None,
219
+ ) -> TensorLikeType:
220
+ if input.dtype.is_complex:
221
+ return _fft_c2c("ifft", input, n, dim, norm, forward=False)
222
+ else:
223
+ return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
224
+
225
+
226
+ @register_decomposition(aten.fft_rfft)
227
+ @out_wrapper()
228
+ def rfft(
229
+ input: TensorLikeType,
230
+ n: Optional[int] = None,
231
+ dim: int = -1,
232
+ norm: NormType = None,
233
+ ) -> TensorLikeType:
234
+ return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
235
+
236
+
237
+ @register_decomposition(aten.fft_irfft)
238
+ @out_wrapper()
239
+ def irfft(
240
+ input: TensorLikeType,
241
+ n: Optional[int] = None,
242
+ dim: int = -1,
243
+ norm: NormType = None,
244
+ ) -> TensorLikeType:
245
+ return _fft_c2r("irfft", input, n, dim, norm, forward=False)
246
+
247
+
248
+ @register_decomposition(aten.fft_hfft)
249
+ @out_wrapper()
250
+ def hfft(
251
+ input: TensorLikeType,
252
+ n: Optional[int] = None,
253
+ dim: int = -1,
254
+ norm: NormType = None,
255
+ ) -> TensorLikeType:
256
+ return _fft_c2r("hfft", input, n, dim, norm, forward=True)
257
+
258
+
259
+ @register_decomposition(aten.fft_ihfft)
260
+ @out_wrapper()
261
+ def ihfft(
262
+ input: TensorLikeType,
263
+ n: Optional[int] = None,
264
+ dim: int = -1,
265
+ norm: NormType = None,
266
+ ) -> TensorLikeType:
267
+ return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
268
+
269
+
270
+ class _ShapeAndDims(NamedTuple):
271
+ shape: Tuple[int, ...]
272
+ dims: Tuple[int, ...]
273
+
274
+
275
+ def _canonicalize_fft_shape_and_dim_args(
276
+ input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
277
+ ) -> _ShapeAndDims:
278
+ """Convert the shape and dim arguments into a canonical form where neither are optional"""
279
+ input_dim = input.ndim
280
+ input_sizes = input.shape
281
+
282
+ if dim is not None:
283
+ if not isinstance(dim, Sequence):
284
+ dim = (dim,)
285
+ ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False)
286
+
287
+ # Check dims are unique
288
+ torch._check(
289
+ len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique"
290
+ )
291
+
292
+ if shape is not None:
293
+ if not isinstance(shape, Sequence):
294
+ shape = (shape,)
295
+
296
+ # Has shape, might have dim
297
+ torch._check(
298
+ dim is None or len(dim) == len(shape),
299
+ lambda: "When given, dim and shape arguments must have the same length",
300
+ )
301
+ transform_ndim = len(shape)
302
+
303
+ torch._check(
304
+ transform_ndim <= input_dim,
305
+ lambda: f"Got shape with {transform_ndim} values but input tensor "
306
+ f"only has {input_dim} dimensions.",
307
+ )
308
+
309
+ # If shape is given, dims defaults to the last len(shape) dimensions
310
+ if dim is None:
311
+ ret_dims = tuple(range(input_dim - transform_ndim, input_dim))
312
+
313
+ # Translate any -1 values in shape to the default length
314
+ ret_shape = tuple(
315
+ s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims) # type: ignore[possibly-undefined]
316
+ )
317
+ elif dim is None:
318
+ # No shape, no dim
319
+ ret_dims = tuple(range(input_dim))
320
+ ret_shape = tuple(input_sizes)
321
+ else:
322
+ # No shape, has dim
323
+ ret_shape = tuple(input_sizes[d] for d in ret_dims) # type: ignore[possibly-undefined]
324
+
325
+ for n in ret_shape:
326
+ torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified")
327
+
328
+ return _ShapeAndDims(shape=ret_shape, dims=ret_dims) # type: ignore[possibly-undefined]
329
+
330
+
331
+ def _prod(xs: Iterable[int]) -> int:
332
+ """Compute product of a list"""
333
+ prod = 1
334
+ for x in xs:
335
+ prod *= x
336
+ return prod
337
+
338
+
339
+ def _fftn_c2c(
340
+ function_name: str,
341
+ input: TensorLikeType,
342
+ shape: Tuple[int, ...],
343
+ dim: Tuple[int, ...],
344
+ norm: NormType,
345
+ forward: bool,
346
+ ) -> TensorLikeType:
347
+ """Common code for n-dimensional complex to complex FFTs (fftn or ifftn)"""
348
+ torch._check(
349
+ input.dtype.is_complex,
350
+ lambda: f"{function_name} expects a complex input tensor, "
351
+ f"but got {input.dtype}",
352
+ )
353
+ x = _resize_fft_input(input, dim, shape)
354
+ output = prims.fft_c2c(x, dim=dim, forward=forward)
355
+ return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)
356
+
357
+
358
+ @register_decomposition(aten.fft_fftn)
359
+ @out_wrapper()
360
+ def fftn(
361
+ input: TensorLikeType,
362
+ s: Optional[ShapeType] = None,
363
+ dim: Optional[DimsType] = None,
364
+ norm: NormType = None,
365
+ ) -> TensorLikeType:
366
+ (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
367
+ x = _maybe_promote_tensor_fft(input, require_complex=True)
368
+ return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
369
+
370
+
371
+ @register_decomposition(aten.fft_ifftn)
372
+ @out_wrapper()
373
+ def ifftn(
374
+ input: TensorLikeType,
375
+ s: Optional[ShapeType] = None,
376
+ dim: Optional[DimsType] = None,
377
+ norm: NormType = None,
378
+ ) -> TensorLikeType:
379
+ (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
380
+ x = _maybe_promote_tensor_fft(input, require_complex=True)
381
+ return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
382
+
383
+
384
+ @register_decomposition(aten.fft_rfftn)
385
+ @out_wrapper()
386
+ def rfftn(
387
+ input: TensorLikeType,
388
+ s: Optional[ShapeType] = None,
389
+ dim: Optional[DimsType] = None,
390
+ norm: NormType = None,
391
+ ) -> TensorLikeType:
392
+ torch._check(
393
+ not input.dtype.is_complex,
394
+ lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}",
395
+ )
396
+ shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
397
+ input = _maybe_promote_tensor_fft(input, require_complex=False)
398
+ input = _resize_fft_input(input, dim, shape)
399
+ out = prims.fft_r2c(input, dim=dim, onesided=True)
400
+ return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True)
401
+
402
+
403
+ @register_decomposition(aten.fft_ihfftn)
404
+ @out_wrapper()
405
+ def ihfftn(
406
+ input: TensorLikeType,
407
+ s: Optional[ShapeType] = None,
408
+ dim: Optional[DimsType] = None,
409
+ norm: NormType = None,
410
+ ) -> TensorLikeType:
411
+ torch._check(
412
+ not input.dtype.is_complex,
413
+ lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}",
414
+ )
415
+ shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
416
+ torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis")
417
+ input = _maybe_promote_tensor_fft(input, require_complex=False)
418
+ input = _resize_fft_input(input, dim, shape)
419
+
420
+ tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
421
+
422
+ if len(dim) == 1:
423
+ tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False)
424
+ return prims.conj(tmp)
425
+
426
+ tmp = prims.conj_physical(tmp)
427
+ tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
428
+ return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False)
429
+
430
+
431
+ class _CanonicalizeC2rReturn(NamedTuple):
432
+ shape: Tuple[int, ...]
433
+ dim: Tuple[int, ...]
434
+ last_dim_size: int
435
+
436
+
437
+ def _canonicalize_fft_c2r_shape_and_dim_args(
438
+ fname: str,
439
+ input: TensorLikeType,
440
+ s: Optional[ShapeType],
441
+ dim: Optional[DimsType],
442
+ ) -> _CanonicalizeC2rReturn:
443
+ """Canonicalize shape and dim arguments for n-dimensional c2r transforms,
444
+ as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
445
+ (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
446
+ torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis")
447
+
448
+ if s is None or s[-1] == -1:
449
+ last_dim_size = 2 * (input.shape[dim[-1]] - 1)
450
+ else:
451
+ last_dim_size = shape[-1]
452
+
453
+ torch._check(
454
+ last_dim_size >= 1,
455
+ lambda: f"Invalid number of data points ({last_dim_size}) specified",
456
+ )
457
+
458
+ shape_list = list(shape)
459
+ shape_list[-1] = last_dim_size // 2 + 1
460
+ return _CanonicalizeC2rReturn(
461
+ shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
462
+ )
463
+
464
+
465
+ @register_decomposition(aten.fft_irfftn)
466
+ @out_wrapper()
467
+ def irfftn(
468
+ input: TensorLikeType,
469
+ s: Optional[ShapeType] = None,
470
+ dim: Optional[DimsType] = None,
471
+ norm: NormType = None,
472
+ ) -> TensorLikeType:
473
+ shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
474
+ "irfftn", input, s, dim
475
+ )
476
+ input = _maybe_promote_tensor_fft(input, require_complex=True)
477
+ input = _resize_fft_input(input, dim, shape)
478
+ out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
479
+ return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
480
+
481
+
482
+ @register_decomposition(aten.fft_hfftn)
483
+ @out_wrapper()
484
+ def hfftn(
485
+ input: TensorLikeType,
486
+ s: Optional[ShapeType] = None,
487
+ dim: Optional[DimsType] = None,
488
+ norm: NormType = None,
489
+ ) -> TensorLikeType:
490
+ shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
491
+ "hfftn", input, s, dim
492
+ )
493
+ input = _maybe_promote_tensor_fft(input, require_complex=True)
494
+ input = _resize_fft_input(input, dim, shape)
495
+
496
+ tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
497
+ tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True)
498
+ tmp = prims.conj_physical(tmp)
499
+ out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
500
+ return _apply_norm(out, norm, last_dim_size, forward=True)
501
+
502
+
503
+ @register_decomposition(aten.fft_fft2)
504
+ @out_wrapper()
505
+ def fft2(
506
+ input: TensorLikeType,
507
+ s: Optional[ShapeType] = None,
508
+ dim: Optional[DimsType] = (-2, -1),
509
+ norm: NormType = None,
510
+ ) -> TensorLikeType:
511
+ return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
512
+
513
+
514
+ @register_decomposition(aten.fft_ifft2)
515
+ @out_wrapper()
516
+ def ifft2(
517
+ input: TensorLikeType,
518
+ s: Optional[ShapeType] = None,
519
+ dim: Optional[DimsType] = (-2, -1),
520
+ norm: NormType = None,
521
+ ) -> TensorLikeType:
522
+ return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
523
+
524
+
525
+ @register_decomposition(aten.fft_rfft2)
526
+ @out_wrapper()
527
+ def rfft2(
528
+ input: TensorLikeType,
529
+ s: Optional[ShapeType] = None,
530
+ dim: Optional[DimsType] = (-2, -1),
531
+ norm: NormType = None,
532
+ ) -> TensorLikeType:
533
+ return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
534
+
535
+
536
+ @register_decomposition(aten.fft_irfft2)
537
+ @out_wrapper()
538
+ def irfft2(
539
+ input: TensorLikeType,
540
+ s: Optional[ShapeType] = None,
541
+ dim: Optional[DimsType] = (-2, -1),
542
+ norm: NormType = None,
543
+ ) -> TensorLikeType:
544
+ return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
545
+
546
+
547
+ @register_decomposition(aten.fft_hfft2)
548
+ @out_wrapper()
549
+ def hfft2(
550
+ input: TensorLikeType,
551
+ s: Optional[ShapeType] = None,
552
+ dim: Optional[DimsType] = (-2, -1),
553
+ norm: NormType = None,
554
+ ) -> TensorLikeType:
555
+ return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
556
+
557
+
558
+ @register_decomposition(aten.fft_ihfft2)
559
+ @out_wrapper()
560
+ def ihfft2(
561
+ input: TensorLikeType,
562
+ s: Optional[ShapeType] = None,
563
+ dim: Optional[DimsType] = (-2, -1),
564
+ norm: NormType = None,
565
+ ) -> TensorLikeType:
566
+ return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
567
+
568
+
569
+ def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
570
+ """Convert Optional[DimsType] to a simple list, defaulting to all dimensions"""
571
+ if dim is None:
572
+ return list(range(x.ndim))
573
+ elif not isinstance(dim, Sequence):
574
+ return [dim]
575
+ else:
576
+ return list(dim)
577
+
578
+
579
+ @register_decomposition(aten.fft_fftshift)
580
+ def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
581
+ dims = _default_alldims(dim, input)
582
+ shift = [input.shape[d] // 2 for d in dims]
583
+ return torch.roll(input, shift, dims)
584
+
585
+
586
+ @register_decomposition(aten.fft_ifftshift)
587
+ def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
588
+ dims = _default_alldims(dim, input)
589
+ shift = [(input.shape[d] + 1) // 2 for d in dims]
590
+ return torch.roll(input, shift, dims)
venv/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ import torch
6
+
7
+ import torch._prims as prims
8
+
9
+ import torch._prims_common as utils
10
+ import torch._refs as refs
11
+ import torch._refs.linalg as linalg
12
+ from torch import Tensor
13
+ from torch._prims_common import (
14
+ check_fp_or_complex,
15
+ check_is_matrix,
16
+ Dim,
17
+ DimsType,
18
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
19
+ IntLike,
20
+ NumberType,
21
+ TensorLikeType,
22
+ )
23
+ from torch._prims_common.wrappers import (
24
+ _maybe_convert_to_dtype,
25
+ elementwise_type_promotion_wrapper,
26
+ out_wrapper,
27
+ )
28
+
29
+
30
+ __all__ = [
31
+ "diagonal",
32
+ "matrix_norm",
33
+ "norm",
34
+ "svd",
35
+ "svdvals",
36
+ "vector_norm",
37
+ "vecdot",
38
+ "cross",
39
+ ]
40
+
41
+
42
+ def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str):
43
+ """
44
+ Checks related to the dtype kwarg in `linalg.*norm` functions
45
+ """
46
+ if dtype is not None:
47
+ torch._check(
48
+ utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype),
49
+ lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}",
50
+ )
51
+ torch._check(
52
+ utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype),
53
+ lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format(
54
+ fn_name=fn_name,
55
+ d="complex" if utils.is_complex_dtype(x_dtype) else "real",
56
+ dtype=dtype,
57
+ ),
58
+ )
59
+ torch._check(
60
+ utils.get_higher_dtype(dtype, x_dtype) == dtype,
61
+ lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible "
62
+ "without narrowing to the specified dtype ({dtype})",
63
+ )
64
+
65
+
66
+ # Utilities should come BEFORE this import
67
+ from torch._decomp import register_decomposition
68
+ from torch._decomp.decompositions import pw_cast_for_opmath
69
+
70
+
71
+ @register_decomposition(torch._ops.ops.aten.linalg_cross)
72
+ @out_wrapper()
73
+ @pw_cast_for_opmath
74
+ def cross(a: Tensor, b: Tensor, dim: int = -1):
75
+ torch._check(
76
+ a.ndim == b.ndim,
77
+ lambda: "linalg.cross: inputs must have the same number of dimensions.",
78
+ )
79
+ torch._check(
80
+ a.size(dim) == 3 and b.size(dim) == 3,
81
+ lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}",
82
+ )
83
+ a, b = torch.broadcast_tensors(a, b)
84
+ dim = utils.canonicalize_dim(a.ndim, dim)
85
+ idx = torch.arange(3, device=a.device)
86
+ return a.index_select(dim, (idx + 1) % 3) * b.index_select(
87
+ dim, (idx + 2) % 3
88
+ ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3)
89
+
90
+
91
+ def diagonal(
92
+ input: TensorLikeType,
93
+ *,
94
+ offset: int = 0,
95
+ dim1: int = -2,
96
+ dim2: int = -1,
97
+ ) -> TensorLikeType:
98
+ return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2)
99
+
100
+
101
+ @register_decomposition(torch._ops.ops.aten.linalg_vector_norm)
102
+ @out_wrapper(exact_dtype=True)
103
+ def vector_norm(
104
+ x: TensorLikeType,
105
+ ord: Union[float, int] = 2,
106
+ dim: Optional[DimsType] = None,
107
+ keepdim: bool = False,
108
+ *,
109
+ dtype: Optional[torch.dtype] = None,
110
+ ) -> Tensor:
111
+ # Checks
112
+ check_fp_or_complex(x.dtype, "linalg.vector_norm")
113
+
114
+ if isinstance(dim, Dim):
115
+ dim = [dim] # type: ignore[assignment]
116
+
117
+ if x.numel() == 0 and (ord < 0.0 or ord == float("inf")):
118
+ torch._check(
119
+ dim is not None and len(dim) != 0,
120
+ lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor "
121
+ "because the operation does not have an identity",
122
+ )
123
+ shape = x.shape
124
+ assert dim is not None # mypy does not seem to be able to see through check?
125
+ for d in dim:
126
+ torch._check(
127
+ shape[d] != 0,
128
+ lambda: f"linalg.vector_norm cannot compute the {ord} norm on the "
129
+ f"dimension {d} because this dimension is empty and the "
130
+ "operation does not have an identity",
131
+ )
132
+ _check_norm_dtype(dtype, x.dtype, "linalg.vector_norm")
133
+
134
+ computation_dtype, result_dtype = utils.reduction_dtypes(
135
+ x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype
136
+ )
137
+
138
+ to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype)
139
+
140
+ # Implementation
141
+ if ord == 0.0:
142
+ return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype)
143
+ elif ord == float("inf"):
144
+ return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
145
+ elif ord == float("-inf"):
146
+ return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type]
147
+ else:
148
+ # From here on the computation dtype is important as the reduction is non-trivial
149
+ x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment]
150
+ reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim)
151
+
152
+ is_ord_even = ord % 2 == 0 if isinstance(ord, IntLike) else ord % 2.0 == 0.0
153
+ if not (is_ord_even and utils.is_float_dtype(x.dtype)):
154
+ x = torch.abs(x)
155
+ return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value]
156
+
157
+
158
+ def _backshift_permutation(dim0, dim1, ndim):
159
+ # Auxiliary function for matrix_norm
160
+ # Computes the permutation that moves the two given dimensions to the back
161
+ ret = [i for i in range(ndim) if i != dim0 and i != dim1]
162
+ ret.extend((dim0, dim1))
163
+ return ret
164
+
165
+
166
+ def _inverse_permutation(perm):
167
+ # Given a permutation, returns its inverse. It's equivalent to argsort on an array
168
+ return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])]
169
+
170
+
171
+ # CompositeImplicitAutograd
172
+ @out_wrapper(exact_dtype=True)
173
+ def matrix_norm(
174
+ A: TensorLikeType,
175
+ ord: Union[float, str] = "fro",
176
+ dim: DimsType = (-2, -1),
177
+ keepdim: bool = False,
178
+ *,
179
+ dtype: Optional[torch.dtype] = None,
180
+ ) -> TensorLikeType:
181
+ # shape
182
+ check_is_matrix(A, "linalg.matrix_norm")
183
+ # dim
184
+ dim = utils.canonicalize_dims(A.ndim, dim)
185
+ if isinstance(dim, Dim):
186
+ dim = (dim,) # type: ignore[assignment]
187
+ torch._check(
188
+ len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}"
189
+ )
190
+ torch._check(
191
+ dim[0] != dim[1],
192
+ lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})",
193
+ )
194
+ # dtype arg
195
+ _check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm")
196
+
197
+ if isinstance(ord, str):
198
+ # ord
199
+ torch._check(
200
+ ord in ("fro", "nuc"),
201
+ lambda: "linalg.matrix_norm: Order {ord} not supported.",
202
+ )
203
+ # dtype
204
+ check_fp_or_complex(
205
+ A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc"
206
+ )
207
+
208
+ if ord == "fro":
209
+ return vector_norm(A, 2, dim, keepdim, dtype=dtype)
210
+ else: # ord == "nuc"
211
+ if dtype is not None:
212
+ A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
213
+ perm = _backshift_permutation(dim[0], dim[1], A.ndim)
214
+ result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim)
215
+ if keepdim:
216
+ inv_perm = _inverse_permutation(perm)
217
+ result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
218
+ return result
219
+ else:
220
+ # ord
221
+ abs_ord = abs(ord)
222
+ torch._check(
223
+ abs_ord in (2, 1, float("inf")),
224
+ lambda: "linalg.matrix_norm: Order {ord} not supported.",
225
+ )
226
+ # dtype
227
+ check_fp_or_complex(
228
+ A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2
229
+ )
230
+
231
+ max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim)
232
+
233
+ if abs_ord == 2.0:
234
+ if dtype is not None:
235
+ A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment]
236
+ perm = _backshift_permutation(dim[0], dim[1], A.ndim)
237
+ result = max_min(svdvals(prims.transpose(A, perm)), dim=-1)
238
+ if keepdim:
239
+ inv_perm = _inverse_permutation(perm)
240
+ result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
241
+ return result
242
+ else: # 1, -1, inf, -inf
243
+ dim0, dim1 = dim
244
+ if abs_ord == float("inf"):
245
+ dim0, dim1 = dim1, dim0
246
+ if not keepdim and (dim0 < dim1):
247
+ dim1 -= 1
248
+ return max_min(
249
+ vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1
250
+ )
251
+
252
+
253
+ # CompositeImplicitAutograd
254
+ @out_wrapper(exact_dtype=True)
255
+ def norm(
256
+ A: TensorLikeType,
257
+ ord: Optional[Union[float, str]] = None,
258
+ dim: Optional[DimsType] = None,
259
+ keepdim: bool = False,
260
+ *,
261
+ dtype: Optional[torch.dtype] = None,
262
+ ) -> TensorLikeType:
263
+ if dim is not None:
264
+ if isinstance(dim, Dim):
265
+ dim = (dim,) # type: ignore[assignment]
266
+ torch._check(
267
+ len(dim) in (1, 2),
268
+ lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}",
269
+ )
270
+ elif ord is not None:
271
+ torch._check(
272
+ A.ndim in (1, 2),
273
+ lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D",
274
+ )
275
+
276
+ if ord is not None and (
277
+ (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2)
278
+ ):
279
+ if dim is None:
280
+ dim = (0, 1)
281
+ return matrix_norm(A, ord, dim, keepdim, dtype=dtype)
282
+ else:
283
+ if ord is None:
284
+ ord = 2.0
285
+ return vector_norm(A, ord, dim, keepdim, dtype=dtype)
286
+
287
+
288
+ # CompositeImplicitAutograd
289
+ @out_wrapper("U", "S", "Vh", exact_dtype=True)
290
+ def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]:
291
+ return prims.svd(A, full_matrices=full_matrices)
292
+
293
+
294
+ # CompositeImplicitAutograd
295
+ @out_wrapper(exact_dtype=True)
296
+ def svdvals(A: TensorLikeType) -> Tensor:
297
+ return svd(A, full_matrices=False)[1]
298
+
299
+
300
+ # CompositeImplicitAutograd
301
+ @out_wrapper()
302
+ @elementwise_type_promotion_wrapper(
303
+ type_promoting_args=("x", "y"),
304
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
305
+ )
306
+ def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor:
307
+ check_fp_or_complex(x.dtype, "linalg.vecdot")
308
+ return (x.conj() * y).sum(dim=dim)
venv/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/_refs/nn/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from typing import List
2
+
3
+ __all__: List[str] = []
venv/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (273 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py ADDED
@@ -0,0 +1,1230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from functools import wraps
3
+ from typing import Callable, Optional, Union
4
+
5
+ import torch
6
+ import torch._prims as prims
7
+ import torch._prims_common as utils
8
+ import torch._refs as refs
9
+ from torch._decomp import register_decomposition
10
+ from torch._prims_common import (
11
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
12
+ NumberType,
13
+ ShapeType,
14
+ TensorLike,
15
+ TensorLikeType,
16
+ )
17
+ from torch._prims_common.wrappers import (
18
+ elementwise_type_promotion_wrapper,
19
+ elementwise_unary_scalar_wrapper,
20
+ out_wrapper,
21
+ )
22
+ from torch._refs import _make_inplace
23
+
24
+ __all__ = [
25
+ "alpha_dropout",
26
+ "celu",
27
+ "celu_",
28
+ "dropout",
29
+ "elu",
30
+ "elu_",
31
+ "gelu",
32
+ "glu",
33
+ "group_norm",
34
+ "hardshrink",
35
+ "hardtanh",
36
+ "hinge_embedding_loss",
37
+ "huber_loss",
38
+ "l1_loss",
39
+ "layer_norm",
40
+ "leaky_relu",
41
+ "log_softmax",
42
+ "margin_ranking_loss",
43
+ "mish",
44
+ "mish_",
45
+ "mse_loss",
46
+ "nll_loss",
47
+ "pairwise_distance",
48
+ "pdist",
49
+ "poisson_nll_loss",
50
+ "prelu",
51
+ "relu",
52
+ "relu6",
53
+ "selu",
54
+ "selu_",
55
+ "smooth_l1_loss",
56
+ "softmax",
57
+ "softmin",
58
+ "softplus",
59
+ "softshrink",
60
+ "tanhshrink",
61
+ "threshold",
62
+ "threshold_",
63
+ "triplet_margin_loss",
64
+ ]
65
+
66
+ Tensor = torch.Tensor
67
+ aten = torch._ops.ops.aten
68
+ DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined]
69
+
70
+
71
+ def _dropout_helper(
72
+ self: TensorLikeType,
73
+ val: float,
74
+ ) -> TensorLikeType:
75
+ """
76
+ Helper function for all dropout-type operators. During training,
77
+ some of the elements of the input tensor are randomly masked.
78
+
79
+ Returns the masked tensor of the boolean values.
80
+
81
+ """
82
+
83
+ return (
84
+ refs._uniform_helper(
85
+ self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device
86
+ )
87
+ < val
88
+ )
89
+
90
+
91
+ @register_decomposition(aten.alpha_dropout)
92
+ def alpha_dropout(
93
+ self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False
94
+ ) -> TensorLikeType:
95
+ if inplace:
96
+ raise NotImplementedError
97
+
98
+ if not training:
99
+ return self
100
+
101
+ torch._check(
102
+ p <= 1 and p >= 0,
103
+ lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
104
+ )
105
+
106
+ if p == 1:
107
+ return torch.zeros_like(self)
108
+
109
+ if p == 0:
110
+ return self
111
+
112
+ dropout_mask = _dropout_helper(self, 1 - p)
113
+
114
+ # From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf)
115
+ # alpha = - SELU.alpha * SELU.scale, here
116
+ # SELU.alpha = 1.6732632423543772848170429916717 and
117
+ # SELU.scale = 1.0507009873554804934193349852946
118
+ alpha = -1.7580993408473766
119
+
120
+ a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p))
121
+ b = torch.logical_not(dropout_mask)
122
+ b = b * (alpha * a) + alpha * a * p
123
+ dropout_mask = a * dropout_mask
124
+
125
+ return self * dropout_mask + b
126
+
127
+
128
+ def _inplace_wrapper(fn):
129
+ """
130
+ Given a nn.functional non-linearity, implements its `inplace: bool` argument
131
+ """
132
+
133
+ # nb. We use the name of the first argument used in the unary references
134
+ @wraps(fn)
135
+ def _fn(a, *args, inplace=False, **kwargs):
136
+ if inplace:
137
+ torch._check(
138
+ "out" not in kwargs,
139
+ lambda: "Cannot set inplace=True and pass out= at the same time",
140
+ )
141
+ return fn(a, *args, inplace=False, out=a, **kwargs)
142
+ else:
143
+ return fn(a, *args, inplace=False, **kwargs)
144
+
145
+ return _fn
146
+
147
+
148
+ # celu is implemented specially because it has an alpha argument
149
+ # celu is very similar to elu
150
+ @register_decomposition(aten.celu)
151
+ @_inplace_wrapper
152
+ @out_wrapper()
153
+ @elementwise_type_promotion_wrapper(
154
+ type_promoting_args=("a",),
155
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
156
+ )
157
+ def celu(
158
+ a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
159
+ ) -> TensorLikeType:
160
+ """
161
+ Reference implementation of torch.nn.functional.celu
162
+ """
163
+
164
+ if inplace:
165
+ raise NotImplementedError
166
+
167
+ rhs: TensorLikeType
168
+ if alpha is not None:
169
+ python_type = utils.dtype_to_type(a.dtype)
170
+ if not utils.is_weakly_lesser_type(type(alpha), python_type):
171
+ msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!"
172
+ raise ValueError(msg)
173
+ rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type]
174
+ else:
175
+ rhs = torch.expm1(a)
176
+
177
+ return torch.where(a > 0, a, rhs)
178
+
179
+
180
+ @_inplace_wrapper
181
+ @out_wrapper()
182
+ def dropout(
183
+ a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False
184
+ ) -> TensorLikeType:
185
+ if inplace:
186
+ raise NotImplementedError
187
+
188
+ if not training:
189
+ return a
190
+
191
+ torch._check(
192
+ p <= 1 and p >= 0,
193
+ lambda: f"dropout probability has to be between 0 and 1, but got, {p}",
194
+ )
195
+
196
+ if p == 1:
197
+ return torch.zeros_like(a)
198
+
199
+ if p == 0:
200
+ return a
201
+
202
+ scale = 1 / (1 - p)
203
+ dropout_mask = _dropout_helper(a, 1 - p)
204
+
205
+ return a * dropout_mask * scale
206
+
207
+
208
+ @register_decomposition(aten.elu)
209
+ @_inplace_wrapper
210
+ @out_wrapper()
211
+ @elementwise_type_promotion_wrapper(
212
+ type_promoting_args=("a",),
213
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
214
+ )
215
+ def elu(
216
+ a: TensorLikeType,
217
+ alpha: NumberType = 1.0,
218
+ scale: NumberType = 1.0,
219
+ input_scale: NumberType = 1.0,
220
+ inplace: bool = False,
221
+ ) -> TensorLikeType:
222
+ """
223
+ Reference implementation of torch.nn.functional.elu
224
+ """
225
+ if inplace:
226
+ raise NotImplementedError
227
+
228
+ # nb. This should be factored out into a can_cast aux function
229
+ python_type = utils.dtype_to_type(a.dtype)
230
+ torch._check(
231
+ utils.is_weakly_lesser_type(type(input_scale), python_type),
232
+ lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!",
233
+ )
234
+ torch._check(
235
+ utils.is_weakly_lesser_type(type(scale), python_type),
236
+ lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!",
237
+ )
238
+ torch._check(
239
+ utils.is_weakly_lesser_type(type(alpha), python_type),
240
+ lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!",
241
+ )
242
+
243
+ return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale))
244
+
245
+
246
+ @register_decomposition(aten.relu)
247
+ @_inplace_wrapper
248
+ @out_wrapper()
249
+ @elementwise_type_promotion_wrapper(
250
+ type_promoting_args=("a",),
251
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
252
+ )
253
+ def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
254
+ """
255
+ Reference implementation of torch.nn.functional.relu
256
+ """
257
+
258
+ if inplace:
259
+ raise NotImplementedError
260
+
261
+ return torch.where(torch.le(a, 0), 0, a)
262
+
263
+
264
+ def group_norm(
265
+ input: Tensor,
266
+ num_groups: int,
267
+ weight: Optional[Tensor] = None,
268
+ bias: Optional[Tensor] = None,
269
+ eps: float = 1e-5,
270
+ ) -> Tensor:
271
+ """
272
+ Reference implementation of :func:`torch.nn.functional.group_norm`.
273
+ """
274
+ torch._check(
275
+ input.ndim >= 2,
276
+ lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}",
277
+ )
278
+
279
+ batch_size = input.shape[0]
280
+ num_channels = input.shape[1]
281
+ torch._check(
282
+ num_channels % num_groups == 0,
283
+ lambda: "Expected number of channels in input to be divisible by num_groups, "
284
+ + f"but got input of shape {input.shape} and num_groups = {num_groups}",
285
+ )
286
+
287
+ # input shape is (N, C, *), so we flatten all inner dimensions except (N, C)
288
+ flattened_inner_size = 1
289
+ for dim_length in input.shape[2:]:
290
+ flattened_inner_size *= dim_length
291
+
292
+ return torch.native_group_norm(
293
+ input,
294
+ weight,
295
+ bias,
296
+ batch_size,
297
+ num_channels,
298
+ flattened_inner_size,
299
+ num_groups,
300
+ eps,
301
+ )[0]
302
+
303
+
304
+ def layer_norm(
305
+ input: Tensor,
306
+ normalized_shape: ShapeType,
307
+ weight: Optional[Tensor] = None,
308
+ bias: Optional[Tensor] = None,
309
+ eps: float = 1e-5,
310
+ ) -> Tensor:
311
+ """
312
+ Reference implementation of :func:`torch.nn.functional.layer_norm`.
313
+ """
314
+ return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0]
315
+
316
+
317
+ @register_decomposition(aten.leaky_relu)
318
+ @_inplace_wrapper
319
+ @out_wrapper()
320
+ @elementwise_type_promotion_wrapper(
321
+ type_promoting_args=("a",),
322
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
323
+ )
324
+ def leaky_relu(
325
+ a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False
326
+ ) -> TensorLikeType:
327
+ """
328
+ Reference implementation of torch.nn.functional.leaky_relu
329
+ """
330
+
331
+ if inplace:
332
+ raise NotImplementedError
333
+
334
+ python_type = utils.dtype_to_type(a.dtype)
335
+ if not utils.is_weakly_lesser_type(type(negative_slope), python_type):
336
+ msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!"
337
+ raise ValueError(msg)
338
+ return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope))
339
+
340
+
341
+ @register_decomposition(aten.mish)
342
+ @_inplace_wrapper
343
+ @out_wrapper()
344
+ @elementwise_type_promotion_wrapper(
345
+ type_promoting_args=("a",),
346
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
347
+ )
348
+ def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
349
+ """
350
+ Reference implementation of torch.nn.functional.mish
351
+ """
352
+
353
+ if inplace:
354
+ raise NotImplementedError
355
+ return a * torch.tanh(torch.nn.functional.softplus(a))
356
+
357
+
358
+ @register_decomposition(aten.selu)
359
+ @_inplace_wrapper
360
+ @out_wrapper()
361
+ @elementwise_type_promotion_wrapper(
362
+ type_promoting_args=("a",),
363
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
364
+ )
365
+ def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
366
+ """
367
+ Reference implementation of torch.nn.functional.selu
368
+ """
369
+ if inplace:
370
+ raise NotImplementedError
371
+
372
+ alpha = 1.6732632423543772848170429916717
373
+ scale = 1.0507009873554804934193349852946
374
+
375
+ rhs = alpha * torch.expm1(a)
376
+
377
+ return scale * torch.where(a > 0, a, rhs)
378
+
379
+
380
+ # Forwarding alias: the functional variant doesn't support the out kwarg
381
+ # CompositeImplicitAutograd - don't register decomp
382
+ def softmax(
383
+ a: TensorLikeType,
384
+ dim: Optional[int] = None,
385
+ _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
386
+ dtype: Optional[torch.dtype] = None,
387
+ ) -> TensorLikeType:
388
+ # The error is for compat with regular PyTorch, which has this behavior
389
+ # deprecated. For PrimTorch, it's fine to drop support for deprecated
390
+ # behavior because it requires explicit opt in. This error is to inform
391
+ # users how to update their calls.
392
+ torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
393
+ return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
394
+
395
+
396
+ # CompositeImplicitAutograd - don't register decomp
397
+ def softmin(
398
+ a: TensorLikeType,
399
+ dim: Optional[int] = None,
400
+ _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
401
+ dtype: Optional[torch.dtype] = None,
402
+ ) -> TensorLikeType:
403
+ # The error is for compat with regular PyTorch, which has this behavior
404
+ # deprecated. For PrimTorch, it's fine to drop support for deprecated
405
+ # behavior because it requires explicit opt in. This error is to inform
406
+ # users how to update their calls.
407
+ torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
408
+ return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload]
409
+
410
+
411
+ # softplus is implemented specially because it has beta and threshold arguments
412
+ @register_decomposition(aten.softplus)
413
+ @_inplace_wrapper
414
+ @out_wrapper()
415
+ @elementwise_type_promotion_wrapper(
416
+ type_promoting_args=("a",),
417
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
418
+ )
419
+ def softplus(
420
+ a: TensorLikeType,
421
+ beta: Optional[NumberType] = None,
422
+ threshold: NumberType = 20,
423
+ inplace: bool = False,
424
+ ) -> TensorLikeType:
425
+ """
426
+ Reference implementation of torch.nn.functional.softplus
427
+ """
428
+
429
+ if inplace:
430
+ raise NotImplementedError
431
+
432
+ rhs: TensorLikeType
433
+ if beta is not None:
434
+ python_type = utils.dtype_to_type(a.dtype)
435
+ if not utils.is_weakly_lesser_type(type(beta), python_type):
436
+ msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!"
437
+ raise ValueError(msg)
438
+ scaled_input = a * beta
439
+ rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type]
440
+
441
+ else:
442
+ scaled_input = a
443
+ rhs = torch.log1p(torch.exp(scaled_input))
444
+
445
+ return torch.where(scaled_input > threshold, a, rhs)
446
+
447
+
448
+ @aten.hardshrink.default.py_impl(DispatchKey.Autograd)
449
+ @register_decomposition(aten.hardshrink)
450
+ @out_wrapper()
451
+ def hardshrink(a: TensorLikeType, lambd: float = 0.5):
452
+ # Formula for reference,
453
+ # hardshrink(x) = x if x > lambd
454
+ # = x if x < -lambd
455
+ # = 0 otherwise
456
+ return torch.where(torch.abs(a) <= lambd, 0, a)
457
+
458
+
459
+ @aten.softshrink.default.py_impl(DispatchKey.Autograd)
460
+ @register_decomposition(aten.softshrink)
461
+ @out_wrapper()
462
+ def softshrink(a: TensorLikeType, lambd: float = 0.5):
463
+ # Formula for reference,
464
+ # softshrink(x) = x - lambd if x > lambd
465
+ # = x + lambd if x < -lambd
466
+ # = 0 otherwise
467
+ torch._check(
468
+ lambd >= 0,
469
+ lambda: f"lambda must be greater or equal to 0, but found to be {lambd}",
470
+ )
471
+ # We implement this in one torch.where to generate better code in the backward
472
+ # see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211
473
+ return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0)
474
+
475
+
476
+ # Losses
477
+ def _reduction_int_to_str(reduction: int) -> str:
478
+ from torch._decomp.decompositions import Reduction
479
+
480
+ if reduction == Reduction.NONE.value:
481
+ return "none"
482
+ elif reduction == Reduction.MEAN.value:
483
+ return "mean"
484
+ elif reduction == Reduction.SUM.value:
485
+ return "sum"
486
+ else:
487
+ raise ValueError(f"{reduction} is not a valid value for reduction")
488
+
489
+
490
+ def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType:
491
+ if reduction == "sum":
492
+ return torch.sum(loss)
493
+ elif reduction == "mean":
494
+ return torch.mean(loss)
495
+ else: # reduction == "none"
496
+ return loss
497
+
498
+
499
+ def _check_reduction_value(reduction: str):
500
+ if reduction not in ("mean", "sum", "none"):
501
+ raise ValueError(f"{reduction} is not a valid value for reduction")
502
+
503
+
504
+ # This helper function maps depreciated arguments, "size_average" and "reduce"
505
+ # to their corresponding "reduction" string argument
506
+ def _get_string_reduction_arg(
507
+ *, size_average: Optional[bool], reduce: Optional[bool]
508
+ ) -> str:
509
+ if size_average is None:
510
+ size_average = True
511
+ if reduce is None:
512
+ reduce = True
513
+ if size_average and reduce:
514
+ ret = "mean"
515
+ elif reduce:
516
+ ret = "sum"
517
+ else:
518
+ ret = "none"
519
+ return ret
520
+
521
+
522
+ # CompositeImplicitAutograd - don't register decomp
523
+ @elementwise_type_promotion_wrapper(
524
+ type_promoting_args=("input", "target"),
525
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
526
+ )
527
+ def l1_loss(
528
+ input: TensorLikeType,
529
+ target: TensorLikeType,
530
+ size_average: Optional[bool] = None,
531
+ reduce: Optional[bool] = None,
532
+ reduction: str = "mean",
533
+ ) -> TensorLikeType:
534
+ """
535
+ Reference implementation of torch.nn.functional.l1_loss
536
+ """
537
+ if size_average is not None or reduce is not None:
538
+ # TODO: Raise exception instead of converting value. This is only for
539
+ # primTorch since it can drop support for deprecated arguments.
540
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
541
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
542
+ _check_reduction_value(reduction)
543
+ loss = torch.abs(input - target)
544
+ return _apply_loss_reduction(loss, reduction)
545
+
546
+
547
+ @elementwise_type_promotion_wrapper(
548
+ type_promoting_args=("input", "target"),
549
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
550
+ )
551
+ def smooth_l1_loss(
552
+ input: TensorLikeType,
553
+ target: TensorLikeType,
554
+ size_average: Optional[bool] = None,
555
+ reduce: Optional[bool] = None,
556
+ reduction: str = "mean",
557
+ beta: float = 1.0,
558
+ ) -> TensorLikeType:
559
+ """
560
+ Reference implementation of torch.nn.functional.smooth_l1_loss
561
+ """
562
+ if size_average is not None or reduce is not None:
563
+ # TODO: Raise exception instead of converting value. This is only for
564
+ # primTorch since it can drop support for deprecated arguments.
565
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
566
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
567
+ _check_reduction_value(reduction)
568
+
569
+ if beta == 0.0:
570
+ return torch.nn.functional.l1_loss(
571
+ input, target, size_average=size_average, reduce=reduce, reduction=reduction
572
+ )
573
+ else:
574
+ loss = torch.abs(input - target)
575
+ loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta)
576
+ return _apply_loss_reduction(loss, reduction)
577
+
578
+
579
+ # Forwarding alias: the functional variant doesn't support the out kwarg
580
+ # CompositeImplicitAutograd - don't register decomp
581
+ def log_softmax(
582
+ a: TensorLikeType,
583
+ dim: Optional[int] = None,
584
+ _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True)
585
+ dtype: Optional[torch.dtype] = None,
586
+ ) -> TensorLikeType:
587
+ # The error is for compat with regular PyTorch, which has this behavior
588
+ # deprecated. For PrimTorch, it's fine to drop support for deprecated
589
+ # behavior because it requires explicit opt in. This error is to inform
590
+ # users how to update their calls.
591
+ torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X")
592
+ return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
593
+
594
+
595
+ @register_decomposition(aten.margin_ranking_loss)
596
+ def margin_ranking_loss(
597
+ input1: TensorLikeType,
598
+ input2: TensorLikeType,
599
+ target: TensorLikeType,
600
+ margin: float = 0.0,
601
+ reduction: str = "mean",
602
+ ) -> TensorLikeType:
603
+ # loss_without_reduction = max(0, −target * (input1 − input2) + margin)
604
+ if input1.ndim != input2.ndim or input1.ndim != target.ndim:
605
+ raise RuntimeError(
606
+ "margin_ranking_loss : All input tensors should have same dimension but got sizes: "
607
+ f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} "
608
+ )
609
+ _check_reduction_value(reduction)
610
+ loss = torch.clamp_min(-target * (input1 - input2) + margin, 0)
611
+ return _apply_loss_reduction(loss, reduction)
612
+
613
+
614
+ @elementwise_type_promotion_wrapper(
615
+ type_promoting_args=("input", "target"),
616
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
617
+ )
618
+ def mse_loss(
619
+ input: TensorLikeType,
620
+ target: TensorLikeType,
621
+ size_average: Optional[bool] = None,
622
+ reduce: Optional[bool] = None,
623
+ reduction: str = "mean",
624
+ ) -> TensorLikeType:
625
+ if size_average is not None or reduce is not None:
626
+ # TODO: Raise exception instead of converting value. This is only for
627
+ # primTorch since it can drop support for deprecated arguments.
628
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
629
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
630
+ _check_reduction_value(reduction)
631
+ loss = torch.pow(input - target, 2)
632
+ return _apply_loss_reduction(loss, reduction)
633
+
634
+
635
+ @register_decomposition(aten.hinge_embedding_loss)
636
+ def hinge_embedding_loss(
637
+ input: TensorLikeType,
638
+ target: TensorLikeType,
639
+ margin: float = 1.0,
640
+ reduction: str = "mean",
641
+ ) -> TensorLikeType:
642
+ # loss_without_reduction = input if y == 1
643
+ # = max(0, margin - input) if y == -1
644
+ _check_reduction_value(reduction)
645
+ margin_clamp = torch.clamp_min(margin - input, 0)
646
+ output_margin = torch.where(target != 1, margin_clamp, 0)
647
+ output_self = torch.where(target != -1, input, 0)
648
+ loss = output_margin + output_self
649
+ return _apply_loss_reduction(loss, reduction)
650
+
651
+
652
+ def _nll_loss_nd(
653
+ input: TensorLikeType,
654
+ target: TensorLikeType,
655
+ weight: Optional[TensorLikeType],
656
+ reduction: str,
657
+ ignore_index: int,
658
+ ) -> TensorLikeType:
659
+ torch._check(
660
+ input.ndim > 0 and input.ndim <= 3,
661
+ lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.",
662
+ )
663
+
664
+ torch._check(
665
+ (input.ndim == 1) or (input.shape[0] == target.shape[0]),
666
+ lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.",
667
+ )
668
+
669
+ _check_reduction_value(reduction)
670
+
671
+ flat_target = torch.flatten(target)
672
+ ignore_classes_mask = torch.eq(flat_target, ignore_index)
673
+
674
+ # TODO: Enable data-dependent checks with debug mode
675
+ # TODO: This check does not work with FakeTensor inputs; See Issue #85834
676
+ # Explicit cast for class_check to bool; See Issue #78071
677
+ """
678
+ from torch._subclasses.fake_tensor import FakeTensor
679
+ num_classes = input.shape[1] if input.ndim > 1 else input.shape[0]
680
+ valid_classes_mask = torch.logical_and(
681
+ (flat_target >= 0), (flat_target < num_classes)
682
+ )
683
+ class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask))
684
+ torch._check(
685
+ isinstance(target, FakeTensor) or bool(class_check.item()),
686
+ lambda: "A target class is out-of-bounds and not the ignore index.",
687
+ )
688
+ """
689
+
690
+ ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device)
691
+ class_weight = (
692
+ torch.scalar_tensor(1, dtype=input.dtype, device=input.device)
693
+ if weight is None
694
+ else weight[flat_target]
695
+ )
696
+ current_weight = torch.where(
697
+ ignore_classes_mask,
698
+ ignore_class_weight,
699
+ class_weight,
700
+ )
701
+
702
+ if input.ndim == 1:
703
+ # implicit batch size = 1
704
+ # input (1 batch size, C classes)
705
+ loss = -input[target] * current_weight
706
+ elif input.ndim == 2:
707
+ # input (N batch size, C classes)
708
+ batch_size = input.shape[0]
709
+ loss = -input[torch.arange(batch_size), target] * current_weight
710
+ else:
711
+ # 3D case (N batch size, C classe, K dimensions)
712
+ # input (N batch size, C classes, K)
713
+ batch_size = input.shape[0]
714
+ extent = input.shape[2]
715
+ numel = batch_size * extent
716
+ indices = torch.arange(numel)
717
+ bdx = indices // extent
718
+ kdx = indices % extent
719
+ loss = -input[bdx, flat_target, kdx] * current_weight
720
+ loss = torch.reshape(loss, target.shape)
721
+
722
+ if reduction == "none":
723
+ return loss
724
+ elif reduction == "sum":
725
+ return torch.sum(loss)
726
+ else:
727
+ # calculate weighted mean of the loss function
728
+ return torch.sum(loss) / torch.sum(current_weight)
729
+
730
+
731
+ @register_decomposition(aten.nll_loss)
732
+ @out_wrapper()
733
+ @elementwise_type_promotion_wrapper(
734
+ type_promoting_args=("input",),
735
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
736
+ )
737
+ def nll_loss(
738
+ input: TensorLikeType,
739
+ target: TensorLikeType,
740
+ weight: Optional[TensorLikeType] = None,
741
+ size_average: Optional[bool] = None,
742
+ ignore_index: int = -100,
743
+ reduce: Optional[bool] = None,
744
+ reduction: str = "mean",
745
+ ) -> TensorLikeType:
746
+ """
747
+ Reference implementation of torch.nn.functional.nll_loss
748
+ """
749
+ torch._check(
750
+ input.ndim > 0,
751
+ lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})",
752
+ )
753
+
754
+ # TODO: raise exception instead of converting value
755
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
756
+ # Convert these options for consistency with the eager mode
757
+ if size_average is not None or reduce is not None:
758
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
759
+
760
+ # The expected behavior when the target and input have zero elements:
761
+ # reduction = 'none' --- tensor([])
762
+ # reduction = 'sum' --- tensor(0.)
763
+ # reduction = 'mean' --- tensor(nan)
764
+ # Mean reduction on empty tensors produces NaN. See the discussion in
765
+ # https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
766
+ if input.numel() == 0 and target.numel() == 0:
767
+ if reduction == "none":
768
+ return torch.zeros_like(target)
769
+ elif reduction == "sum":
770
+ return torch.empty_like(target)
771
+ else:
772
+ return torch.full_like(target, float("nan"))
773
+
774
+ # The _nll_loss_nd helper function handles the most common cases.
775
+ # ndim == 1 (Single Example)
776
+ # => Batch Size: 1, Input: (C), Target: ()
777
+ # ndim == 2 (k = 1)
778
+ # => Batch Size: N, Input: (N, C), Target: (N)
779
+ # ndim == 3 (k > 1)
780
+ # => Batch Size: N, Input: (N, C, K), Target: (N, K)
781
+ if input.ndim <= 3:
782
+ return _nll_loss_nd(input, target, weight, reduction, ignore_index)
783
+
784
+ # For ndim > 3, we reshape the input and target to 3-D case.
785
+ # Input (N batch-size, C classes, k-dimensions)
786
+ # Target (N batch-size, k-dimensions)
787
+ torch._check(
788
+ input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:],
789
+ lambda: (
790
+ "Expected input and target to both have ndim > 0 and "
791
+ "target.shape[1:] == input.shape[2:], but got "
792
+ f"target.shape {target.shape} and input.shape {input.shape}"
793
+ ),
794
+ )
795
+
796
+ batch_size = input.shape[0]
797
+ num_classes = input.shape[1]
798
+ out_size = [batch_size] + list(target.shape[1:])
799
+
800
+ input = torch.reshape(input, [batch_size, num_classes, -1])
801
+ target = torch.reshape(target, [batch_size, -1])
802
+ if reduction != "none":
803
+ return _nll_loss_nd(input, target, weight, reduction, ignore_index)
804
+ else:
805
+ result = _nll_loss_nd(input, target, weight, reduction, ignore_index)
806
+ # reshape flattened inner-dim to original k-dimensions
807
+ return torch.reshape(result, out_size)
808
+
809
+
810
+ # TODO: This ref supports int reduction and out kwarg to be compatible with ATen:
811
+ # https://github.com/pytorch/pytorch/issues/83931
812
+ # TODO: Could be rewritten to support complex:
813
+ # https://github.com/pytorch/pytorch/pull/85041
814
+ @register_decomposition(aten.huber_loss)
815
+ @out_wrapper()
816
+ @elementwise_type_promotion_wrapper(
817
+ type_promoting_args=("input", "target"),
818
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
819
+ )
820
+ def huber_loss(
821
+ input: TensorLikeType,
822
+ target: TensorLikeType,
823
+ reduction: Union[str, int] = "mean",
824
+ delta: float = 1.0,
825
+ ) -> TensorLikeType:
826
+ """
827
+ Reference implementation of torch.nn.functional.huber_loss
828
+ """
829
+ if type(reduction) is int:
830
+ reduction = _reduction_int_to_str(reduction)
831
+ _check_reduction_value(reduction) # type: ignore[arg-type]
832
+ torch._check(
833
+ delta > 0,
834
+ lambda: "huber_loss does not support non-positive values for delta.",
835
+ )
836
+ z = (input - target).abs()
837
+ loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta))
838
+ return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type]
839
+
840
+
841
+ # tanhshrink does not use _make_elementwise_unary_reference because it does not support out
842
+ @elementwise_unary_scalar_wrapper
843
+ @elementwise_type_promotion_wrapper(
844
+ type_promoting_args=("a",),
845
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
846
+ )
847
+ def tanhshrink(a: TensorLikeType) -> TensorLikeType:
848
+ """
849
+ Reference implementation of torch.nn.functional.tanhshrink
850
+ """
851
+ if not isinstance(a, TensorLike):
852
+ raise RuntimeError(
853
+ "Expected a tensor input for an elementwise unary operation!"
854
+ )
855
+ return a - torch.tanh(a)
856
+
857
+
858
+ @register_decomposition(aten.threshold)
859
+ @_inplace_wrapper
860
+ @out_wrapper()
861
+ @elementwise_type_promotion_wrapper(
862
+ type_promoting_args=("a",),
863
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
864
+ )
865
+ def threshold(
866
+ a: TensorLikeType,
867
+ threshold: NumberType,
868
+ value: Union[bool, int, float],
869
+ inplace: bool = False,
870
+ ) -> TensorLikeType:
871
+ """
872
+ Reference implementation of torch.nn.functional.threshold
873
+ """
874
+
875
+ if inplace:
876
+ raise NotImplementedError
877
+
878
+ return torch.where(a <= threshold, value, a)
879
+
880
+
881
+ # CompositeImplicitAutograd - don't register decomp
882
+ # No elementwise type promotion - core op doesn't explicitly type promote
883
+ def triplet_margin_loss(
884
+ anchor: TensorLikeType,
885
+ positive: TensorLikeType,
886
+ negative: TensorLikeType,
887
+ margin: float = 1.0,
888
+ p: float = 2,
889
+ eps: float = 1e-6,
890
+ swap: bool = False,
891
+ size_average: Optional[bool] = None,
892
+ reduce: Optional[bool] = None,
893
+ reduction: str = "mean",
894
+ ) -> TensorLikeType:
895
+ if size_average is not None or reduce is not None:
896
+ # TODO: Raise exception instead of converting value. This is only for
897
+ # primTorch since it can drop support for deprecated arguments.
898
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
899
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
900
+
901
+ # torch.nn.functional.triplet_margin_with_distance_loss has no ref defined
902
+ # since it's a pure Python implementation. Use this helper instead.
903
+ return _triplet_margin_with_distance_loss(
904
+ anchor=anchor,
905
+ positive=positive,
906
+ negative=negative,
907
+ distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps),
908
+ margin=margin,
909
+ swap=swap,
910
+ reduction=reduction,
911
+ )
912
+
913
+
914
+ # Pure Python impl - don't register decomp and don't add a ref. Defined as a
915
+ # helper here since triplet_margin_loss can be nicely implemented with it.
916
+ def _triplet_margin_with_distance_loss(
917
+ anchor: TensorLikeType,
918
+ positive: TensorLikeType,
919
+ negative: TensorLikeType,
920
+ *,
921
+ distance_function: Optional[
922
+ Callable[[TensorLikeType, TensorLikeType], TensorLikeType]
923
+ ] = None,
924
+ margin: float = 1.0,
925
+ swap: bool = False,
926
+ reduction: str = "mean",
927
+ ) -> TensorLikeType:
928
+ _check_reduction_value(reduction)
929
+
930
+ a_dim = anchor.ndim
931
+ p_dim = positive.ndim
932
+ n_dim = negative.ndim
933
+ torch._check(
934
+ a_dim == p_dim and p_dim == n_dim,
935
+ lambda: (
936
+ f"The anchor, positive, and negative tensors are expected to have "
937
+ f"the same number of dimensions, but got: anchor {a_dim}D, "
938
+ f"positive {p_dim}D, and negative {n_dim}D inputs"
939
+ ),
940
+ )
941
+
942
+ if distance_function is None:
943
+ distance_function = torch.pairwise_distance
944
+
945
+ dist_pos = distance_function(anchor, positive)
946
+ dist_neg = distance_function(anchor, negative)
947
+ # The distance swap is described in the paper "Learning shallow
948
+ # convolutional feature descriptors with triplet losses" by V. Balntas, E.
949
+ # Riba et al. If True, and if the positive example is closer to the
950
+ # negative example than the anchor is, swaps the positive example and the
951
+ # anchor in the loss computation.
952
+ if swap:
953
+ dist_swap = distance_function(positive, negative)
954
+ dist_neg = torch.minimum(dist_neg, dist_swap)
955
+ loss = torch.clamp_min(margin + dist_pos - dist_neg, 0)
956
+ return _apply_loss_reduction(loss, reduction)
957
+
958
+
959
+ @register_decomposition(aten.hardtanh)
960
+ @_inplace_wrapper
961
+ @out_wrapper()
962
+ @elementwise_unary_scalar_wrapper
963
+ @elementwise_type_promotion_wrapper(
964
+ type_promoting_args=("a"),
965
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
966
+ )
967
+ def hardtanh(
968
+ a: TensorLikeType,
969
+ min_val: NumberType = -1,
970
+ max_val: NumberType = 1,
971
+ inplace: bool = False,
972
+ ) -> TensorLikeType:
973
+ """
974
+ Reference implementation of torch.nn.functional.hardtanh
975
+ """
976
+ if inplace:
977
+ raise NotImplementedError
978
+ if utils.is_boolean_dtype(a.dtype):
979
+ raise RuntimeError("Bool inputs not supported for hardtanh")
980
+
981
+ # preserve legacy behavior of boundaries not causing type promotion
982
+ if utils.is_integer_dtype(a.dtype):
983
+ min_val = int(min_val) # type: ignore[arg-type]
984
+ max_val = int(max_val) # type: ignore[arg-type]
985
+ if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)):
986
+ raise RuntimeError(
987
+ "Cannot do hardtanh on an unsigned type with negative limits"
988
+ )
989
+ return torch.clamp(a, min_val, max_val) # type: ignore[arg-type]
990
+
991
+
992
+ @register_decomposition(aten.gelu)
993
+ @out_wrapper()
994
+ @elementwise_unary_scalar_wrapper
995
+ @elementwise_type_promotion_wrapper(
996
+ type_promoting_args=("a",),
997
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
998
+ )
999
+ def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType:
1000
+ """
1001
+ Reference implementation of torch.nn.functional.gelu
1002
+ """
1003
+ if not isinstance(a, TensorLike):
1004
+ raise RuntimeError(
1005
+ "Expected a tensor input for an elementwise unary operation!"
1006
+ )
1007
+ M_SQRT2 = 1.41421356237309504880
1008
+ M_SQRT1_2 = 0.70710678118654752440
1009
+ M_2_SQRTPI = 1.12837916709551257390
1010
+ if approximate == "tanh":
1011
+ kBeta = M_SQRT2 * M_2_SQRTPI * 0.5
1012
+ kKappa = 0.044715
1013
+ a_cube = a * a * a
1014
+ inner = kBeta * (a + kKappa * a_cube)
1015
+ return 0.5 * a * (1 + torch.tanh(inner))
1016
+ elif approximate == "none":
1017
+ kAlpha = M_SQRT1_2
1018
+ return a * 0.5 * (1 + torch.erf(a * kAlpha))
1019
+ else:
1020
+ raise RuntimeError("approximate argument must be either none or tanh.")
1021
+
1022
+
1023
+ # CompositeImplicitAutograd - don't register decomp
1024
+ @elementwise_type_promotion_wrapper(
1025
+ type_promoting_args=("input", "target"),
1026
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
1027
+ )
1028
+ def poisson_nll_loss(
1029
+ input: TensorLikeType,
1030
+ target: TensorLikeType,
1031
+ log_input: bool = True,
1032
+ full: bool = False,
1033
+ size_average: Optional[bool] = None,
1034
+ eps: float = 1e-8,
1035
+ reduce: Optional[bool] = None,
1036
+ reduction: str = "mean",
1037
+ ) -> TensorLikeType:
1038
+ """
1039
+ Reference implementation of torch.nn.functional.poisson_nll_loss
1040
+ """
1041
+ if size_average is not None or reduce is not None:
1042
+ # TODO: Raise exception instead of converting value. This is only for
1043
+ # primTorch since it can drop support for deprecated arguments.
1044
+ # msg = "size_average and reduce args are deprecated, please use reduction argument."
1045
+ reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
1046
+ _check_reduction_value(reduction)
1047
+ if log_input:
1048
+ loss = torch.exp(input) - target * input
1049
+ else:
1050
+ loss = input - target * torch.log(input + eps)
1051
+
1052
+ if full:
1053
+ stirling_term = (
1054
+ target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target)
1055
+ )
1056
+ # avoid inplace add
1057
+ loss = loss + stirling_term.masked_fill(target <= 1, 0)
1058
+ return _apply_loss_reduction(loss, reduction)
1059
+
1060
+
1061
+ @register_decomposition(aten.prelu)
1062
+ @elementwise_type_promotion_wrapper(
1063
+ type_promoting_args=("a", "weight"),
1064
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
1065
+ )
1066
+ def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType:
1067
+ """
1068
+ Reference implementation of torch.nn.functional.prelu
1069
+ """
1070
+ torch._check(
1071
+ isinstance(a, TensorLike),
1072
+ lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}",
1073
+ )
1074
+ torch._check(
1075
+ isinstance(weight, TensorLike),
1076
+ lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}",
1077
+ )
1078
+
1079
+ if weight.numel() != 1:
1080
+ torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.")
1081
+ channel_size = a.shape[1] if a.ndim >= 2 else 1
1082
+ torch._check(
1083
+ weight.numel() == channel_size,
1084
+ lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers ="
1085
+ f" {weight.numel()} and channel size = {channel_size}.",
1086
+ )
1087
+
1088
+ torch._check(
1089
+ weight.ndim == 0 or weight.ndim == 1,
1090
+ lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: "
1091
+ f"ndim = {weight.ndim}",
1092
+ )
1093
+ if a.ndim == 0:
1094
+ weight = weight[0] if weight.ndim == 1 else weight
1095
+ else:
1096
+ weight = prims.broadcast_in_dim(
1097
+ weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,)
1098
+ )
1099
+
1100
+ return torch.where(a > 0, a, a * weight)
1101
+
1102
+
1103
+ @register_decomposition(aten.relu6)
1104
+ @_inplace_wrapper
1105
+ @out_wrapper()
1106
+ def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
1107
+ """
1108
+ Reference implementation of torch.nn.functional.relu6
1109
+ """
1110
+ if inplace:
1111
+ raise NotImplementedError
1112
+
1113
+ # See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126
1114
+ # It may be better to use clamp here, but we use hardtanh to replicate
1115
+ # the behavior of the existing implementation
1116
+ return torch.nn.functional.hardtanh(a, 0, 6)
1117
+
1118
+
1119
+ @register_decomposition(aten.glu)
1120
+ @out_wrapper()
1121
+ @elementwise_type_promotion_wrapper(
1122
+ type_promoting_args=("a",),
1123
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
1124
+ )
1125
+ def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType:
1126
+ dim = utils.canonicalize_dims(a.ndim, dim)
1127
+ torch._check(
1128
+ a.shape[dim] % 2 == 0,
1129
+ lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}",
1130
+ )
1131
+ b, c = torch.tensor_split(a, 2, dim)
1132
+
1133
+ return b * torch.sigmoid(c)
1134
+
1135
+
1136
+ @register_decomposition(aten.pairwise_distance)
1137
+ @out_wrapper()
1138
+ def pairwise_distance(
1139
+ x1: TensorLikeType,
1140
+ x2: TensorLikeType,
1141
+ p: NumberType = 2.0,
1142
+ eps: NumberType = 1e-6,
1143
+ keepdim=False,
1144
+ ) -> TensorLikeType:
1145
+ return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim)
1146
+
1147
+
1148
+ @register_decomposition(aten.pdist)
1149
+ @out_wrapper()
1150
+ @elementwise_type_promotion_wrapper(
1151
+ type_promoting_args=("a",),
1152
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
1153
+ )
1154
+ def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType:
1155
+ torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D")
1156
+ torch._check(p >= 0, lambda: "pdist only supports non-negative p values")
1157
+ # For p == 2 we can use an efficient implementation, but other values of p
1158
+ # require creating a much bigger tensor for an intermediate step
1159
+ if p == 2:
1160
+ aTa = torch.mm(a, a.T)
1161
+ aTa_diag = torch.diag(aTa)
1162
+ t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0))
1163
+ else:
1164
+ t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2)
1165
+ i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device)
1166
+ return t.flatten().index_select(0, i[0] * t.shape[0] + i[1])
1167
+
1168
+
1169
+ @register_decomposition(aten.pixel_shuffle)
1170
+ @out_wrapper()
1171
+ def pixel_shuffle(self: Tensor, upscale_factor: int):
1172
+ torch._check(
1173
+ self.dim() >= 3,
1174
+ lambda: f"pixel_shuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)",
1175
+ )
1176
+ batch = self.shape[:-3]
1177
+ C_out = self.shape[-3] // upscale_factor**2
1178
+ HW_out = (self.shape[-2] * upscale_factor, self.shape[-1] * upscale_factor)
1179
+ n = len(batch)
1180
+ B_dims = range(n)
1181
+ C_dim, r1_dim, r2_dim, H_dim, W_dim = range(n, n + 5)
1182
+ return (
1183
+ self.view(
1184
+ *batch,
1185
+ C_out,
1186
+ upscale_factor,
1187
+ upscale_factor,
1188
+ self.shape[-2],
1189
+ self.shape[-1],
1190
+ )
1191
+ .permute(*B_dims, C_dim, H_dim, r1_dim, W_dim, r2_dim)
1192
+ .reshape(*batch, C_out, *HW_out)
1193
+ .clone(memory_format=utils.suggest_memory_format(self))
1194
+ )
1195
+
1196
+
1197
+ @register_decomposition(aten.pixel_unshuffle)
1198
+ @out_wrapper()
1199
+ def pixel_unshuffle(self: Tensor, downscale_factor: int):
1200
+ torch._check(
1201
+ self.dim() >= 3,
1202
+ lambda: f"pixel_unshuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)",
1203
+ )
1204
+ batch = self.shape[:-3]
1205
+ C_out = self.shape[-3] * downscale_factor**2
1206
+ HW_out = (self.shape[-2] // downscale_factor, self.shape[-1] // downscale_factor)
1207
+ n = len(batch)
1208
+ B_dims = range(n)
1209
+ C_dim, H_dim, r1_dim, W_dim, r2_dim = range(n, n + 5)
1210
+ return (
1211
+ self.view(
1212
+ *batch,
1213
+ self.shape[-3],
1214
+ HW_out[0],
1215
+ downscale_factor,
1216
+ HW_out[1],
1217
+ downscale_factor,
1218
+ )
1219
+ .permute(*B_dims, C_dim, r1_dim, r2_dim, H_dim, W_dim)
1220
+ .reshape(*batch, C_out, *HW_out)
1221
+ .clone(memory_format=utils.suggest_memory_format(self))
1222
+ )
1223
+
1224
+
1225
+ # Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg)
1226
+ celu_ = _make_inplace(celu)
1227
+ elu_ = _make_inplace(elu)
1228
+ mish_ = _make_inplace(mish)
1229
+ selu_ = _make_inplace(selu)
1230
+ threshold_ = _make_inplace(threshold)
venv/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (26.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_refs/special/__init__.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional, Union
3
+
4
+ import torch
5
+ import torch._prims as prims
6
+ import torch._prims_common as utils
7
+ import torch._refs as refs
8
+
9
+ from torch import Tensor
10
+ from torch._decomp import register_decomposition
11
+ from torch._prims_common import (
12
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
13
+ Number,
14
+ NumberType,
15
+ TensorLike,
16
+ TensorLikeType,
17
+ )
18
+ from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper
19
+ from torch._refs import (
20
+ _make_alias,
21
+ _make_elementwise_binary_reference,
22
+ _make_elementwise_unary_reference,
23
+ )
24
+
25
+
26
+ __all__ = [
27
+ "bessel_j0",
28
+ "bessel_j1",
29
+ "entr",
30
+ "erfcx",
31
+ "expit",
32
+ "i0e",
33
+ "i1",
34
+ "i1e",
35
+ "log_ndtr",
36
+ "logit",
37
+ "log_softmax",
38
+ "multigammaln",
39
+ "ndtr",
40
+ "ndtri",
41
+ "softmax",
42
+ "spherical_bessel_j0",
43
+ "xlog1py",
44
+ "zeta",
45
+ ]
46
+ aten = torch._ops.ops.aten
47
+
48
+
49
+ @_make_elementwise_unary_reference(
50
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
51
+ )
52
+ def bessel_j0(a: TensorLikeType) -> TensorLikeType:
53
+ return prims.bessel_j0(a)
54
+
55
+
56
+ @_make_elementwise_unary_reference(
57
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
58
+ )
59
+ def bessel_j1(a: TensorLikeType) -> TensorLikeType:
60
+ return prims.bessel_j1(a)
61
+
62
+
63
+ @register_decomposition(aten.special_entr)
64
+ @out_wrapper()
65
+ @elementwise_type_promotion_wrapper(
66
+ type_promoting_args=("a",),
67
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
68
+ )
69
+ def entr(a: TensorLikeType) -> TensorLikeType:
70
+ return torch.where(
71
+ torch.isnan(a),
72
+ a,
73
+ torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)),
74
+ )
75
+
76
+
77
+ @register_decomposition(aten.special_erfcx)
78
+ @out_wrapper()
79
+ @elementwise_type_promotion_wrapper(
80
+ type_promoting_args=("a",),
81
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
82
+ )
83
+ def erfcx(a: TensorLikeType) -> TensorLikeType:
84
+ return prims.erfcx(a)
85
+
86
+
87
+ # alias for sigmoid
88
+ expit = _make_alias(torch.sigmoid, "expit")
89
+
90
+
91
+ @_make_elementwise_unary_reference(
92
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
93
+ )
94
+ def i0e(a: TensorLikeType) -> TensorLikeType:
95
+ return prims.bessel_i0e(a)
96
+
97
+
98
+ @_make_elementwise_unary_reference(
99
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
100
+ )
101
+ def i1(a: TensorLikeType) -> TensorLikeType:
102
+ return prims.bessel_i1(a)
103
+
104
+
105
+ @_make_elementwise_unary_reference(
106
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
107
+ )
108
+ def i1e(a: TensorLikeType) -> TensorLikeType:
109
+ return prims.bessel_i1e(a)
110
+
111
+
112
+ @register_decomposition(aten.special_log_ndtr)
113
+ @out_wrapper()
114
+ @elementwise_type_promotion_wrapper(
115
+ type_promoting_args=("a",),
116
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
117
+ )
118
+ def log_ndtr(a: TensorLikeType) -> TensorLikeType:
119
+ # Note: M_SQRT1_2 is the value of 1 / √2
120
+ M_SQRT1_2 = 0.707106781186547524400844362104849039
121
+ t = a * M_SQRT1_2
122
+ return torch.where(
123
+ a < 1.0,
124
+ torch.log(torch.special.erfcx(-t) / 2) - t * t,
125
+ torch.log1p(-torch.erfc(t) / 2),
126
+ )
127
+
128
+
129
+ @register_decomposition(aten.logit)
130
+ @out_wrapper()
131
+ @elementwise_type_promotion_wrapper(
132
+ type_promoting_args=("self",),
133
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
134
+ )
135
+ def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType:
136
+ if eps is None:
137
+ eps = -1.0
138
+ lo = eps
139
+ hi = 1 - eps
140
+ self = torch.clamp(self, lo, hi)
141
+ return torch.log(torch.true_divide(self, torch.sub(1, self)))
142
+
143
+
144
+ @register_decomposition(aten.special_xlog1py)
145
+ @out_wrapper()
146
+ @elementwise_type_promotion_wrapper(
147
+ type_promoting_args=("a", "b"),
148
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
149
+ )
150
+ def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]):
151
+ torch._check(
152
+ isinstance(a, TensorLike) or isinstance(b, TensorLike),
153
+ lambda: 'Expected either argument a or b to be a Tensor"',
154
+ )
155
+
156
+ # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors.
157
+ if isinstance(a, TensorLike) and isinstance(b, Number):
158
+ b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device)
159
+ elif isinstance(b, TensorLike) and isinstance(a, Number):
160
+ a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device)
161
+
162
+ # mypy: expected "Tensor"
163
+ assert isinstance(a, TensorLike)
164
+ assert isinstance(b, TensorLike)
165
+ rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b)))
166
+ return torch.where(torch.isnan(b), float("nan"), rhs)
167
+
168
+
169
+ @register_decomposition(aten.mvlgamma)
170
+ @out_wrapper()
171
+ @elementwise_type_promotion_wrapper(
172
+ type_promoting_args=("a",),
173
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
174
+ )
175
+ def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
176
+ c = 0.25 * p * (p - 1) * math.log(math.pi)
177
+ b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device)
178
+ return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c
179
+
180
+
181
+ @register_decomposition(aten.special_ndtr)
182
+ @out_wrapper()
183
+ @elementwise_type_promotion_wrapper(
184
+ type_promoting_args=("a",),
185
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
186
+ )
187
+ def ndtr(a: TensorLikeType) -> TensorLikeType:
188
+ # Note: M_SQRT1_2 is the value of 1 / √2
189
+ M_SQRT1_2 = 0.707106781186547524400844362104849039
190
+ a_sqrt_2 = a * M_SQRT1_2
191
+ return (1 + torch.erf(a_sqrt_2)) * 0.5
192
+
193
+
194
+ @register_decomposition(aten.special_ndtri)
195
+ @out_wrapper()
196
+ @elementwise_type_promotion_wrapper(
197
+ type_promoting_args=("a",),
198
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
199
+ )
200
+ def ndtri(a: TensorLikeType) -> TensorLikeType:
201
+ return prims.ndtri(a)
202
+
203
+
204
+ # Forwarding alias: the special variant doesn't support the out kwarg
205
+ # CompositeImplicitAutograd - don't register decomp
206
+ def log_softmax(
207
+ a: TensorLikeType,
208
+ dim: int,
209
+ dtype: Optional[torch.dtype] = None,
210
+ ) -> TensorLikeType:
211
+ return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
212
+
213
+
214
+ # Forwarding alias: the special variant doesn't support the out kwarg
215
+ # CompositeImplicitAutograd - don't register decomp
216
+ def softmax(
217
+ a: TensorLikeType,
218
+ dim: int,
219
+ dtype: Optional[torch.dtype] = None,
220
+ ) -> TensorLikeType:
221
+ return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
222
+
223
+
224
+ @_make_elementwise_unary_reference(
225
+ ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
226
+ )
227
+ def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType:
228
+ return prims.spherical_bessel_j0(a)
229
+
230
+
231
+ # TODO: add docstring
232
+ @_make_elementwise_binary_reference(
233
+ type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
234
+ )
235
+ def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType:
236
+ return prims.zeta(a, b)
venv/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._subclasses.fake_tensor import (
4
+ DynamicOutputShapeException,
5
+ FakeTensor,
6
+ FakeTensorMode,
7
+ UnsupportedFakeTensorException,
8
+ )
9
+
10
+ from torch._subclasses.fake_utils import CrossRefFakeMode
11
+
12
+ __all__ = [
13
+ "FakeTensor",
14
+ "FakeTensorMode",
15
+ "UnsupportedFakeTensorException",
16
+ "DynamicOutputShapeException",
17
+ "CrossRefFakeMode",
18
+ ]
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (485 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_impls.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-310.pyc ADDED
Binary file (42.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/fake_utils.cpython-310.pyc ADDED
Binary file (5.57 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/functional_tensor.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/meta_utils.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/__pycache__/schema_check_mode.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/_subclasses/fake_impls.py ADDED
@@ -0,0 +1,1061 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import itertools
5
+ import math
6
+ import sys
7
+ from typing import Callable, Union
8
+
9
+ import torch
10
+ import torch._custom_op
11
+ import torch._logging
12
+
13
+ from torch._ops import OpOverload
14
+ from torch._prims_common import (
15
+ elementwise_dtypes,
16
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
17
+ is_boolean_dtype,
18
+ is_float_dtype,
19
+ is_integer_dtype,
20
+ )
21
+
22
+ from torch._subclasses.fake_tensor import (
23
+ DataDependentOutputException,
24
+ DynamicOutputShapeException,
25
+ FakeTensor,
26
+ in_kernel_invocation_manager,
27
+ run_fallback_kernel,
28
+ UnsupportedOperatorException,
29
+ )
30
+ from torch.fx.operator_schemas import normalize_function
31
+
32
+ from torch.utils._stats import count_label
33
+
34
+ pytree = torch.utils._pytree
35
+
36
+ __all__ = [
37
+ "op_implementations_checks",
38
+ "get_fast_op_impls",
39
+ "stride_incorrect_op",
40
+ "has_meta",
41
+ ]
42
+
43
+ op_implementations_dict = {}
44
+ op_implementations_checks = []
45
+
46
+
47
+ aten = torch._ops.ops.aten
48
+
49
+
50
+ def ordered_set(*items):
51
+ return dict.fromkeys(items, True)
52
+
53
+
54
+ # This function indicates if the backend device
55
+ # supports non-contiguous tensors
56
+ def is_noncontiguous_supported(device):
57
+ if device.type == "hpu":
58
+ return False
59
+ return True
60
+
61
+
62
+ _like_tensor_constructors = ordered_set(
63
+ aten.empty_like.default,
64
+ aten.empty_like.out,
65
+ aten.full_like.default,
66
+ aten.full_like.out,
67
+ aten.ones_like.default,
68
+ aten.ones_like.out,
69
+ aten.rand_like.default,
70
+ aten.rand_like.out,
71
+ aten.randn_like.default,
72
+ aten.randn_like.out,
73
+ aten.randint_like.default,
74
+ aten.randint_like.out,
75
+ aten.randint_like.low_dtype,
76
+ aten.randint_like.low_dtype_out,
77
+ aten.zeros_like.default,
78
+ aten.zeros_like.out,
79
+ aten.new_empty.default,
80
+ aten.new_empty.out,
81
+ aten.new_empty_strided.default,
82
+ aten.new_empty_strided.out,
83
+ aten.new_full.default,
84
+ aten.new_full.out,
85
+ aten.new_zeros.default,
86
+ aten.new_zeros.out,
87
+ aten.new_ones.default,
88
+ aten.new_ones.out,
89
+ )
90
+
91
+
92
+ _device_not_kwarg_ops = ordered_set(
93
+ aten._resize_output_.default,
94
+ aten._nested_tensor_from_tensor_list.default,
95
+ aten._nested_tensor_from_tensor_list.out,
96
+ aten.pin_memory.default,
97
+ aten.is_pinned.default,
98
+ aten.to.device,
99
+ aten.to.prim_Device,
100
+ aten._pin_memory.default,
101
+ aten._pin_memory.out,
102
+ aten._resize_output.default,
103
+ aten._resize_output.out,
104
+ )
105
+
106
+ # this op is never actually used
107
+ _non_kwarg_device_constructors = (aten._list_to_tensor,)
108
+
109
+
110
+ def contains_tensor_types(type):
111
+ tensor_type = torch._C.TensorType.get()
112
+ return type.isSubtypeOf(tensor_type) or any(
113
+ contains_tensor_types(e) for e in type.containedTypes()
114
+ )
115
+
116
+
117
+ @functools.lru_cache(None)
118
+ def _is_tensor_constructor(func: OpOverload):
119
+ assert isinstance(func, OpOverload)
120
+ schema = func._schema
121
+ if any(contains_tensor_types(arg.type) for arg in schema.arguments):
122
+ return False
123
+ # TODO: no real reason to restrict multiple outputs
124
+ return (
125
+ len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()
126
+ )
127
+
128
+
129
+ def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]):
130
+ def impl_decorator(op_impl):
131
+ if isinstance(run_impl_check, OpOverload):
132
+ assert (
133
+ run_impl_check not in op_implementations_dict
134
+ ), f"duplicate registration: {run_impl_check}"
135
+ op_implementations_dict[run_impl_check] = op_impl
136
+ elif isinstance(run_impl_check, (list, tuple)):
137
+ for op in run_impl_check:
138
+ register_op_impl(op)(op_impl)
139
+ else:
140
+ assert callable(run_impl_check)
141
+ op_implementations_checks.append((run_impl_check, op_impl))
142
+
143
+ return op_impl
144
+
145
+ return impl_decorator
146
+
147
+
148
+ @register_op_impl(op_implementations_dict.__contains__)
149
+ def dispatch_to_op_implementations_dict(fake_mode, func, *args, **kwargs):
150
+ return op_implementations_dict[func](fake_mode, func, *args, **kwargs)
151
+
152
+
153
+ @register_op_impl(_is_tensor_constructor)
154
+ @register_op_impl([*_like_tensor_constructors])
155
+ def constructors(fake_mode, func, *args, **kwargs):
156
+ assert func not in _non_kwarg_device_constructors
157
+ _, new_kwargs = normalize_function(
158
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
159
+ )
160
+ if "names" in kwargs:
161
+ raise UnsupportedOperatorException(
162
+ "torch.compile doesn't support named tensors"
163
+ )
164
+
165
+ if func in _like_tensor_constructors:
166
+ default_device = new_kwargs["input"].device
167
+ # TODO: file issue
168
+ args = (new_kwargs.pop("input"),)
169
+ else:
170
+ # cpu is default device if none is specified
171
+ default_device = torch.device("cpu")
172
+ args = ()
173
+ out_device = new_kwargs.pop("device", None)
174
+ out_device = out_device if out_device is not None else default_device
175
+ new_kwargs["device"] = torch.device("meta")
176
+ # _like constructors have fake tensor inputs (maybe this causes the non-like
177
+ # to fail? hmmm)
178
+ with in_kernel_invocation_manager(fake_mode):
179
+ r = func(*args, **new_kwargs)
180
+ return FakeTensor(fake_mode, r, out_device)
181
+
182
+
183
+ @register_op_impl(aten.to.prim_Device)
184
+ @register_op_impl(aten.to.device)
185
+ def non_kwarg_to(fake_mode, func, *args, **kwargs):
186
+ _, new_kwargs = normalize_function(
187
+ func, args, kwargs, normalize_to_only_use_kwargs=True
188
+ )
189
+ input_device = new_kwargs["device"]
190
+ out_device = input_device if input_device else new_kwargs["input"].device
191
+ new_kwargs["device"] = torch.device("meta")
192
+ inp = new_kwargs.pop("input")
193
+ with in_kernel_invocation_manager(fake_mode):
194
+ r = func(inp, **new_kwargs)
195
+ # TODO: I think this does the wrong thing if r is inp
196
+ return fake_mode.fake_tensor_converter.from_meta_and_device(
197
+ fake_mode, r, out_device
198
+ )
199
+
200
+
201
+ def stride_incorrect_op(op):
202
+ if op.namespace not in ("aten", "prims"):
203
+ return False
204
+ if op is aten._fft_c2c.default:
205
+ return False
206
+
207
+ op_name = op.name()
208
+ if "fft" in op_name:
209
+ return True
210
+ return False
211
+
212
+
213
+ # These operators have meta implementations with incorrect strides
214
+ @register_op_impl(stride_incorrect_op)
215
+ def wordaround_stride_incorrect_op(fake_mode, func, *args, **kwargs):
216
+ # This is a workaround for meta implmentations with incorrect strides
217
+
218
+ def is_symbolic(x):
219
+ if isinstance(x, FakeTensor):
220
+ return x._has_symbolic_sizes_strides
221
+ if isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool)):
222
+ return True
223
+ return False
224
+
225
+ # For static shapes, we can fall back to eager for the real strides
226
+ if fake_mode.allow_fallback_kernels:
227
+ require_dynamic = any(
228
+ is_symbolic(x) for x in itertools.chain(args, kwargs.values())
229
+ )
230
+ if not require_dynamic:
231
+ flat_args, args_spec = pytree.tree_flatten((args, kwargs))
232
+ return run_fallback_kernel(fake_mode, func, flat_args, args_spec, None)
233
+
234
+ raise UnsupportedOperatorException(func)
235
+
236
+
237
+ # Dont default to default device handling,
238
+ # since the device of `the_template` is ignored
239
+ @register_op_impl(aten.resize_as_.default)
240
+ def resize_as_(fake_mode, func, *args, **kwargs):
241
+ with in_kernel_invocation_manager(fake_mode):
242
+ return func(*args, **kwargs)
243
+
244
+
245
+ @register_op_impl(aten._sparse_coo_tensor_with_dims_and_tensors.default)
246
+ def _sparse_coo_tensor_with_dims_and_tensors(fake_mode, func, *args, **kwargs):
247
+ # TODO: remove me
248
+ return constructors(fake_mode, func, *args, **kwargs)
249
+
250
+
251
+ # index.Tensor data-dependent in only some conditions
252
+ @register_op_impl(
253
+ lambda func: torch.Tag.dynamic_output_shape in func.tags
254
+ and func
255
+ not in [aten.index.Tensor, aten.nonzero.default, aten.repeat_interleave.Tensor]
256
+ )
257
+ def dyn_shape(fake_mode, func, *args, **kwargs):
258
+ raise DynamicOutputShapeException(func)
259
+
260
+
261
+ @register_op_impl(aten.repeat_interleave.Tensor)
262
+ def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None):
263
+ if output_size is None:
264
+ if (
265
+ fake_mode.shape_env is None
266
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
267
+ ):
268
+ raise DynamicOutputShapeException(func)
269
+
270
+ output_size = fake_mode.shape_env.create_unbacked_symint()
271
+
272
+ # Avoid importing sympy at a module level
273
+ from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size
274
+
275
+ _constrain_range_for_size(output_size)
276
+ # TODO: consider a memo
277
+ return repeats.new_empty(output_size)
278
+
279
+
280
+ @register_op_impl(torch.ops.aten._local_scalar_dense.default)
281
+ def local_scalar_dense(fake_mode, func, arg):
282
+ if fake_mode.shape_env is None or not fake_mode.shape_env.allow_scalar_outputs:
283
+ # Without symints/symfloats, cannot handle this
284
+ raise DataDependentOutputException(func)
285
+ if is_float_dtype(arg.dtype):
286
+ return fake_mode.shape_env.create_unbacked_symfloat()
287
+ elif is_integer_dtype(arg.dtype):
288
+ return fake_mode.shape_env.create_unbacked_symint()
289
+ elif is_boolean_dtype(arg.dtype):
290
+ return fake_mode.shape_env.create_unbacked_symbool()
291
+ else:
292
+ raise NotImplementedError(f"local_scalar_dense/item NYI for {arg.dtype}")
293
+
294
+
295
+ @register_op_impl(torch.ops.aten.nonzero.default)
296
+ def nonzero(fake_mode, func, arg):
297
+ if (
298
+ fake_mode.shape_env is None
299
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
300
+ ):
301
+ # Without symints/symfloats, cannot handle this
302
+ raise DynamicOutputShapeException(func)
303
+
304
+ if arg.nonzero_memo is None:
305
+ nnz = fake_mode.shape_env.create_unbacked_symint()
306
+
307
+ # This is unsound, but it works well in practice
308
+ # See https://docs.google.com/document/d/1lFRYAJo5nrfxRhwIzGnfi2pbLpU6T4ytSRSuLJ5qebI/edit#
309
+ # TODO: Add a config knob to turn off this unsound behavior
310
+ #
311
+ # NB: If numel < 2, the bounds here might be COMPLETELY
312
+ # disjoint with what can actually occur. But this is fine:
313
+ # remember, the hypothesis is that if your later code works
314
+ # with N >= 2, it will work with N = 1 and N = 0.
315
+ maxval = sys.maxsize - 1
316
+
317
+ # Avoid importing sympy at a module level
318
+ from torch.fx.experimental.symbolic_shapes import (
319
+ _constrain_range_for_size,
320
+ has_free_symbols,
321
+ )
322
+
323
+ if not has_free_symbols(arg.numel()):
324
+ # Don't upgrade the range if numel is less than two, since we then
325
+ # have an empty range which makes things go explodey. We also
326
+ # don't allow for 2 because that would specialize the unbacked
327
+ # SymInt to 2, which is also likely to be buggy.
328
+ if arg.numel() > 2:
329
+ maxval = int(arg.numel())
330
+
331
+ _constrain_range_for_size(nnz, max=maxval)
332
+
333
+ arg._nonzero_memo = nnz
334
+ arg._nonzero_memo_vc = arg._version
335
+
336
+ return arg.new_empty((arg.nonzero_memo, arg.dim()), dtype=torch.int64)
337
+
338
+
339
+ @register_op_impl(torch.ops.aten.masked_select.default)
340
+ def masked_select(fake_mode, func, self, mask):
341
+ if (
342
+ fake_mode.shape_env is None
343
+ or not fake_mode.shape_env.allow_dynamic_output_shape_ops
344
+ ):
345
+ # Without symints/symfloats, cannot handle this
346
+ raise DynamicOutputShapeException(func)
347
+
348
+ nnz = fake_mode.shape_env.create_unbacked_symint()
349
+
350
+ # see nonzero for commentary
351
+ maxval = sys.maxsize - 1
352
+
353
+ # Avoid importing sympy at a module level
354
+ from torch.fx.experimental.symbolic_shapes import (
355
+ _constrain_range_for_size,
356
+ has_free_symbols,
357
+ )
358
+
359
+ if not has_free_symbols(self.numel()):
360
+ if self.numel() > 2:
361
+ maxval = int(self.numel())
362
+
363
+ _constrain_range_for_size(nnz, max=maxval)
364
+
365
+ return self.new_empty((nnz,))
366
+
367
+
368
+ # NB: this must be ordered after local_scalar_dense
369
+ @register_op_impl(lambda func: torch.Tag.data_dependent_output in func.tags)
370
+ def data_dep(fake_mode, func, *args, **kwargs):
371
+ raise DataDependentOutputException(func)
372
+
373
+
374
+ # Bool Indices get Expanded as Masks
375
+ # See: IndexingUtils.h:expandTensors
376
+ def check_no_bool_index_tensors(func, self, indices):
377
+ for index in indices:
378
+ if index is not None and index.dtype in (torch.bool, torch.uint8):
379
+ raise DynamicOutputShapeException(func)
380
+
381
+
382
+ def run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs):
383
+ _, new_kwargs = normalize_function(
384
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
385
+ )
386
+
387
+ out_device = new_kwargs["input"].device
388
+ with in_kernel_invocation_manager(fake_mode):
389
+ out = func(*args, **kwargs)
390
+ if not is_noncontiguous_supported(out_device):
391
+ out = out.new_empty(out.shape)
392
+
393
+ if out is new_kwargs["input"]:
394
+ return out # copy_
395
+ return FakeTensor(fake_mode, out, out_device)
396
+
397
+
398
+ _is_builtin_namespaces = ordered_set("aten", "prims", "prim")
399
+
400
+
401
+ def is_builtin(op):
402
+ return op.namespace in _is_builtin_namespaces
403
+
404
+
405
+ def has_meta(func):
406
+ return torch._C._dispatch_has_computed_kernel_for_dispatch_key(func.name(), "Meta")
407
+
408
+
409
+ @register_op_impl(
410
+ lambda func: is_builtin(func) and "foreach" in func.name() and has_meta(func)
411
+ )
412
+ def foreach_run_and_map_input_device(fake_mode, func, *args, **kwargs):
413
+ tensor_lists = []
414
+ for arg in itertools.chain(args, kwargs.values()):
415
+ if (
416
+ isinstance(arg, (list, tuple))
417
+ and len(arg)
418
+ and isinstance(arg[0], torch.Tensor)
419
+ ):
420
+ tensor_lists.append(arg)
421
+
422
+ try:
423
+ with in_kernel_invocation_manager(fake_mode):
424
+ out_meta = func(*args, **kwargs)
425
+ except NotImplementedError as not_implemented_error:
426
+ return NotImplemented
427
+
428
+ if not out_meta:
429
+ return out_meta
430
+
431
+ assert tensor_lists
432
+ out_fake = []
433
+
434
+ for i, meta_t in enumerate(out_meta):
435
+ device, _ = FakeTensor._find_common_device(func, [tl[i] for tl in tensor_lists])
436
+ out_fake.append(
437
+ fake_mode.fake_tensor_converter.from_meta_and_device(
438
+ fake_mode, meta_t, device
439
+ )
440
+ )
441
+
442
+ return out_fake
443
+
444
+
445
+ # Dont default to default device handling,
446
+ # Since op can take in non-zero sized cpu
447
+ # index tensors with cuda self
448
+ @register_op_impl(aten.index.Tensor)
449
+ def index_tensor(fake_mode, func, *args, **kwargs):
450
+ from torch._meta_registrations import meta_index_Tensor
451
+
452
+ _, new_kwargs = normalize_function(
453
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
454
+ )
455
+
456
+ out_device = new_kwargs["input"].device
457
+ # ensure nonzero call goes to fake tensor
458
+ with fake_mode:
459
+ out = meta_index_Tensor(*args, **kwargs)
460
+ return out.to(out_device)
461
+
462
+
463
+ # Can take mixed meta/non-meta arguments; the meta registration
464
+ # will roughly do the right thing even when given real devices
465
+ @register_op_impl(aten._embedding_bag.default)
466
+ def embedding_bag(fake_mode, func, *args, **kwargs):
467
+ from torch._meta_registrations import meta_embedding_bag
468
+
469
+ with fake_mode:
470
+ return meta_embedding_bag(*args, **kwargs)
471
+
472
+
473
+ # takes in multiple-devices, dont default to default device handling
474
+ @register_op_impl(aten._unsafe_index_put.default)
475
+ @register_op_impl(aten.copy.default)
476
+ @register_op_impl(aten.copy_.default)
477
+ @register_op_impl(aten.slice_scatter.default)
478
+ def multi_device_op_default(fake_mode, func, *args, **kwargs):
479
+ return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
480
+
481
+
482
+ # same with multi_device_op_default, but return the input
483
+ @register_op_impl(aten.copy.out)
484
+ @register_op_impl(aten.slice_scatter.out)
485
+ def multi_device_op_out(fake_mode, func, *args, **kwargs):
486
+ with in_kernel_invocation_manager(fake_mode):
487
+ out = func(*args, **kwargs)
488
+
489
+ _, new_kwargs = normalize_function(
490
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
491
+ )
492
+
493
+ return new_kwargs["input"]
494
+
495
+
496
+ @register_op_impl(aten.index_put.default)
497
+ @register_op_impl(aten.index_put_.default)
498
+ def index_put_impl(fake_mode, func, *args, **kwargs):
499
+ _, new_kwargs = normalize_function(
500
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
501
+ )
502
+
503
+ values = new_kwargs["values"]
504
+ self_device = new_kwargs["input"].fake_device
505
+ torch._check(
506
+ self_device == values.fake_device or (values.ndim == 0 and values.numel() == 1),
507
+ lambda: f"Mismatching {func} device between self ({self_device}) and values ({values.device})",
508
+ )
509
+
510
+ out = run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
511
+ if func is aten.index_put_.default:
512
+ return new_kwargs["input"]
513
+ else:
514
+ return out
515
+
516
+
517
+ @register_op_impl(aten._nested_tensor_from_tensor_list.default)
518
+ @register_op_impl(aten._nested_tensor_from_tensor_list.out)
519
+ def nested_tensors_unsupported(fake_mode, func, *args, **kwargs):
520
+ raise UnsupportedOperatorException(
521
+ "torch.compile does not support strided NestedTensor"
522
+ )
523
+
524
+
525
+ @register_op_impl(
526
+ [
527
+ x
528
+ for x in _device_not_kwarg_ops
529
+ if x
530
+ not in (
531
+ # these are already registered elsewhere
532
+ aten.to.device,
533
+ aten.to.prim_Device,
534
+ aten._nested_tensor_from_tensor_list.default,
535
+ aten._nested_tensor_from_tensor_list.out,
536
+ )
537
+ ]
538
+ )
539
+ def nyi(fake_mode, func, *args, **kwargs):
540
+ assert func not in _device_not_kwarg_ops, f"NYI: {func}"
541
+
542
+
543
+ @register_op_impl([aten.convolution.default, aten.convolution_backward.default])
544
+ def conv(fake_mode, func, *args, **kwargs):
545
+ _, kwargs = normalize_function(
546
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
547
+ )
548
+ device = kwargs["input"].fake_device
549
+ # need to re-enable mode so the tensors report fake device
550
+ with fake_mode:
551
+ # if the input is unsqueezed is done in Convolution.cpp we get segfault
552
+ k = kwargs["weight"].ndim
553
+ batch = kwargs["input"].shape[0]
554
+
555
+ # Avoid importing sympy at a module level
556
+ from torch.fx.experimental.symbolic_shapes import has_hint
557
+
558
+ if not has_hint(batch):
559
+ # TODO: We can make this a little more faithful with best effort
560
+ # channels last detection (but only if it's statically obvious!)
561
+ mem_fmt = None
562
+ elif k == 3 and not kwargs["input"].is_mkldnn and not kwargs["input"].is_xpu:
563
+ mem_fmt = None
564
+ else:
565
+ if func is aten.convolution.default:
566
+ conv_backend = torch._C._select_conv_backend(**kwargs)
567
+ else:
568
+ conv_backend = torch._C._select_conv_backend(
569
+ kwargs["input"],
570
+ kwargs["weight"],
571
+ bias=None,
572
+ stride=kwargs["stride"],
573
+ padding=kwargs["padding"],
574
+ dilation=kwargs["dilation"],
575
+ transposed=kwargs["transposed"],
576
+ output_padding=kwargs["output_padding"],
577
+ groups=kwargs["groups"],
578
+ bias_sizes=kwargs["bias_sizes"],
579
+ )
580
+ mem_fmt = torch._C._conv_determine_backend_memory_format(
581
+ kwargs["input"], kwargs["weight"], conv_backend
582
+ )
583
+
584
+ def convert(t, mem_fmt):
585
+ if t is None:
586
+ return t
587
+ if mem_fmt is not None:
588
+ t = t.to(memory_format=mem_fmt)
589
+ return FakeTensor(fake_mode, t, device)
590
+
591
+ with in_kernel_invocation_manager(fake_mode):
592
+ out = func(**kwargs)
593
+
594
+ if func is aten.convolution.default:
595
+ return convert(out, mem_fmt)
596
+ else:
597
+ return (
598
+ convert(out[0], mem_fmt),
599
+ convert(out[1], mem_fmt),
600
+ convert(out[2], None),
601
+ )
602
+
603
+
604
+ @register_op_impl(aten._scaled_dot_product_flash_attention.default)
605
+ def meta__scaled_dot_product_flash(fake_mode, func, *args, **kwargs):
606
+ _, kwargs = normalize_function(
607
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
608
+ )
609
+
610
+ query = kwargs["query"]
611
+ key = kwargs["key"]
612
+ return_debug_mask = kwargs["return_debug_mask"]
613
+ # unused: value, dropout_p, is_causal, scale
614
+
615
+ def convert_tensor(t, device):
616
+ return FakeTensor(fake_mode, t, device)
617
+
618
+ batch_size = query.size(0)
619
+ num_heads = query.size(1)
620
+ max_seqlen_batch_q = query.size(2)
621
+ head_dim = query.size(3)
622
+ max_seqlen_batch_k = key.size(2)
623
+
624
+ query_t = query.transpose(1, 2)
625
+ # empty_like already returns a fake tensor so we don't need to convert it
626
+ attention = torch.empty_like(query_t).transpose(1, 2)
627
+ logsumexp = convert_tensor(
628
+ torch.empty(
629
+ (batch_size, num_heads, max_seqlen_batch_q),
630
+ dtype=torch.float,
631
+ device="meta",
632
+ ),
633
+ device=query.device,
634
+ )
635
+
636
+ if return_debug_mask:
637
+ blocksize_c = 128 if head_dim > 64 else 256
638
+ max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c)
639
+ if max_seqlen_batch_k <= 128:
640
+ max_seqlen_k = 128
641
+ elif max_seqlen_batch_k <= 256:
642
+ max_seqlen_k = 256
643
+ debug_mask = convert_tensor(
644
+ torch.empty(
645
+ (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k),
646
+ dtype=query.dtype,
647
+ device="meta",
648
+ ),
649
+ device=query.device,
650
+ )
651
+ else:
652
+ debug_mask = convert_tensor(
653
+ torch.empty(0, dtype=query.dtype, device="meta"),
654
+ query.device,
655
+ )
656
+
657
+ # Note [Seed and Offset]: device for seed and offset below depends on whether we are
658
+ # capturing or not, but at the time of tracing we don't know if we
659
+ # are going to use cudagraphs or not, so we return meta tensors here
660
+ # it's possible we'll need to have some special handling in inductor for sdpa
661
+
662
+ return (
663
+ attention,
664
+ logsumexp,
665
+ None,
666
+ None,
667
+ max_seqlen_batch_q,
668
+ max_seqlen_batch_k,
669
+ convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device),
670
+ convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device),
671
+ debug_mask,
672
+ )
673
+
674
+
675
+ @register_op_impl(aten._scaled_dot_product_efficient_attention.default)
676
+ def meta__scaled_dot_product_efficient(fake_mode, func, *args, **kwargs):
677
+ _, kwargs = normalize_function(
678
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
679
+ )
680
+
681
+ query = kwargs["query"]
682
+ key = kwargs["key"]
683
+ value = kwargs["value"]
684
+ compute_log_sumexp = kwargs["compute_log_sumexp"]
685
+ # unused: attn_bias, dropout_p, is_causal, scale
686
+
687
+ def convert_tensor(t, device):
688
+ return FakeTensor(fake_mode, t, device)
689
+
690
+ query = query.transpose(1, 2)
691
+ key = key.transpose(1, 2)
692
+ value = value.transpose(1, 2)
693
+
694
+ B = query.size(0)
695
+ M = query.size(1)
696
+ N = key.size(1)
697
+ num_heads = query.size(-2)
698
+ K = query.size(-1)
699
+ Kv = value.size(-1)
700
+
701
+ res = convert_tensor(
702
+ torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device="meta"),
703
+ query.device,
704
+ )
705
+
706
+ logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
707
+ logsum_exp = convert_tensor(
708
+ torch.empty(
709
+ (B, num_heads, logsumexp_dim),
710
+ dtype=torch.float,
711
+ device="meta",
712
+ ),
713
+ query.device,
714
+ )
715
+
716
+ res = res.transpose(1, 2)
717
+
718
+ # See Note [Seed and Offset]:
719
+ seed = convert_tensor(
720
+ torch.empty((), dtype=torch.long, device="meta"), query.device
721
+ )
722
+ offset = convert_tensor(
723
+ torch.empty((), dtype=torch.long, device="meta"), query.device
724
+ )
725
+
726
+ return res, logsum_exp, seed, offset
727
+
728
+
729
+ @register_op_impl(aten._flash_attention_forward.default)
730
+ def meta__flash_attention_forward(fake_mode, func, *args, **kwargs):
731
+ _, kwargs = normalize_function(
732
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
733
+ )
734
+
735
+ query = kwargs["query"]
736
+ key = kwargs["key"]
737
+ cum_seq_q = kwargs["cum_seq_q"]
738
+ cum_seq_k = kwargs["cum_seq_k"]
739
+ max_q = kwargs["max_q"]
740
+ max_k = kwargs["max_k"]
741
+ return_debug_mask = kwargs["return_debug_mask"]
742
+ # unused: value, dropout_p, is_causal, scale
743
+
744
+ def convert_tensor(t, device):
745
+ return FakeTensor(fake_mode, t, device)
746
+
747
+ # NB: there are two underlying paths:
748
+ # 1. normal dense path; expect 4D inputs of shape (batch_size, seqlen, num_heads, head_dim)
749
+ # 2. varseqlen path; expect 3D inputs of shape (total, num_heads, head_dim) where total
750
+ # includes all batch item sequences. cum_seq_q / cum_seq_k contain offsets into total
751
+ batch_size = query.size(0) if cum_seq_q is None else cum_seq_q.numel() - 1
752
+ max_seqlen_batch_q = query.size(1) if cum_seq_q is None else max_q
753
+ max_seqlen_batch_k = key.size(1) if cum_seq_k is None else max_k
754
+ num_heads = query.size(-2)
755
+ head_dim = query.size(-1)
756
+
757
+ # Cuda Path
758
+ # note: empty_like already returns a fake tensor, we don't need to wrap it
759
+ attention = torch.empty_like(query)
760
+ logsumexp = convert_tensor(
761
+ torch.empty(
762
+ (batch_size, num_heads, max_seqlen_batch_q),
763
+ dtype=torch.float,
764
+ device="meta",
765
+ ),
766
+ device=query.device,
767
+ )
768
+
769
+ if return_debug_mask:
770
+ blocksize_c = 128 if head_dim > 64 else 256
771
+ max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c)
772
+ if max_seqlen_batch_k <= 128:
773
+ max_seqlen_k = 128
774
+ elif max_seqlen_batch_k <= 256:
775
+ max_seqlen_k = 256
776
+ debug_mask = convert_tensor(
777
+ torch.empty(
778
+ (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k),
779
+ dtype=query.dtype,
780
+ device="meta",
781
+ ),
782
+ query.device,
783
+ )
784
+ else:
785
+ debug_mask = convert_tensor(
786
+ torch.empty(0, dtype=query.dtype, device="meta"),
787
+ query.device,
788
+ )
789
+
790
+ # See Note [Seed and Offset]:
791
+ return (
792
+ attention,
793
+ logsumexp,
794
+ convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device),
795
+ convert_tensor(torch.empty((), dtype=torch.long, device="meta"), query.device),
796
+ debug_mask,
797
+ )
798
+
799
+
800
+ @register_op_impl(aten._efficient_attention_forward.default)
801
+ def meta__efficient_attention_forward(fake_mode, func, *args, **kwargs):
802
+ _, kwargs = normalize_function(
803
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
804
+ )
805
+
806
+ query = kwargs["query"]
807
+ key = kwargs["key"]
808
+ value = kwargs["value"]
809
+ cu_seqlens_q = kwargs["cu_seqlens_q"]
810
+ max_seqlen_q = kwargs["max_seqlen_q"]
811
+ max_seqlen_k = kwargs["max_seqlen_k"]
812
+ compute_log_sumexp = kwargs["compute_log_sumexp"]
813
+ # unused: bias, cu_seqlens_k, dropout_p, custom_mask_type, scale, causal_diagonal, seqlen_k
814
+
815
+ def convert_tensor(t, device):
816
+ return FakeTensor(fake_mode, t, device)
817
+
818
+ B = query.size(0)
819
+ M = query.size(1)
820
+ N = key.size(1)
821
+ num_heads = query.size(-2)
822
+ K = query.size(-1)
823
+ Kv = value.size(-1)
824
+
825
+ res = convert_tensor(
826
+ torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device="meta"),
827
+ query.device,
828
+ )
829
+
830
+ logsumexp_batch_dim = cu_seqlens_q.size(0) - 1 if (cu_seqlens_q is not None) else B
831
+ actual_max_seqlen_q = M
832
+ if cu_seqlens_q is not None:
833
+ assert max_seqlen_q is not None
834
+ actual_max_seqlen_q = max_seqlen_q
835
+ actual_max_seqlen_k = max_seqlen_k if max_seqlen_k is not None else N
836
+ logsumexp_dim = (
837
+ math.ceil(actual_max_seqlen_q / 32) * 32 if compute_log_sumexp else 0
838
+ )
839
+ logsum_exp = convert_tensor(
840
+ torch.empty(
841
+ (logsumexp_batch_dim, num_heads, logsumexp_dim),
842
+ dtype=torch.float,
843
+ device="meta",
844
+ ),
845
+ query.device,
846
+ )
847
+
848
+ # See Note [Seed and Offset]:
849
+ seed = convert_tensor(
850
+ torch.empty((), dtype=torch.long, device="meta"), query.device
851
+ )
852
+ offset = convert_tensor(
853
+ torch.empty((), dtype=torch.long, device="meta"), query.device
854
+ )
855
+
856
+ return res, logsum_exp, seed, offset, actual_max_seqlen_q, actual_max_seqlen_k
857
+
858
+
859
+ FAST_OP_IMPLEMENTATIONS = {}
860
+
861
+
862
+ # Unlike register_op_impl, these don't do the slow iteration for
863
+ # run_impl_check, and these run BEFORE decompositions
864
+ def register_fast_op_impl(func: OpOverload):
865
+ def impl_decorator(op_impl):
866
+ FAST_OP_IMPLEMENTATIONS[func] = op_impl
867
+ return op_impl
868
+
869
+ return impl_decorator
870
+
871
+
872
+ # infer_size_impl in ExpandUtils
873
+ def infer_size(a, b):
874
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
875
+
876
+ dimsA = len(a)
877
+ dimsB = len(b)
878
+ ndim = max(dimsA, dimsB)
879
+ expandedSizes = [0] * ndim
880
+ for i in range(ndim - 1, -1, -1):
881
+ offset = ndim - 1 - i
882
+ dimA = dimsA - 1 - offset
883
+ dimB = dimsB - 1 - offset
884
+ sizeA = a[dimA] if dimA >= 0 else 1
885
+ sizeB = b[dimB] if dimB >= 0 else 1
886
+
887
+ # NB: It is very important to test for broadcasting, before testing
888
+ # sizeA == sizeB. This is because the broadcasting tests are likely
889
+ # to be statically known (in particular, if sizeA/sizeB is unbacked
890
+ # but size-like, we will unsoundly assume they never equal 1), but
891
+ # the sizeA == sizeB test may not be statically known. However, once
892
+ # we have established that no broadcasting is happening, the
893
+ # sizeA == sizeB is now expect_true and we can defer it as a runtime
894
+ # assert (this works because Python will return the terminal
895
+ # expression of an or statement as-is, without bool()'ing it; if this
896
+ # were not the case, we'd need to write this using torch.sym_or() or
897
+ # something like that).
898
+ torch._check(
899
+ guard_size_oblivious(sizeA == 1)
900
+ or guard_size_oblivious(sizeB == 1)
901
+ or sizeA == sizeB,
902
+ lambda: f"The size of tensor a ({sizeA}) "
903
+ f"must match the size of tensor b ({sizeB}) "
904
+ f"at non-singleton dimension {i})",
905
+ )
906
+ expandedSizes[i] = sizeB if guard_size_oblivious(sizeA == 1) else sizeA
907
+ return tuple(expandedSizes)
908
+
909
+
910
+ def make_fast_binary_impl(slow_ref):
911
+ def fast_binary_impl(mode, *args, **kwargs):
912
+ def slow(msg):
913
+ count_label(f"slow {msg}")
914
+ with mode:
915
+ return slow_ref(*args, **kwargs)
916
+
917
+ count_label("attempt fast")
918
+
919
+ # Fast path (based off of TensorIterator fast path).
920
+ # Unfortunately, there is no way to easily deduplicate
921
+ # this with either the TensorIterator C++ implementation
922
+ # (which we don't want to SymIntify, and also the algorithm
923
+ # here is slightly different from TensorIterator to allow
924
+ # for broadcasting), nor the PrimTorch implementation
925
+ # (which does not actually implement a fast path.)
926
+
927
+ operands = args
928
+
929
+ # compute_shape
930
+ has_scalars = False
931
+ has_tensors = False
932
+ final_shape = None
933
+ for op in operands:
934
+ shape = op.shape if isinstance(op, torch.Tensor) else ()
935
+ if len(shape) == 0:
936
+ has_scalars = True
937
+ else:
938
+ has_tensors = True
939
+ if final_shape is None:
940
+ final_shape = shape
941
+ # TODO: Minor optimization: track if the shapes
942
+ # were equal so you can skip the equality check
943
+ # below if unnecessary
944
+ final_shape = infer_size(final_shape, shape)
945
+ assert final_shape is not None
946
+
947
+ # Do some extra safety checks to see if the output
948
+ # stride is obvious
949
+ for op in operands:
950
+ if (
951
+ isinstance(op, torch.Tensor)
952
+ and len(op.shape) == len(final_shape)
953
+ and op.shape == final_shape
954
+ ):
955
+ break
956
+ else:
957
+ return slow("both tensors nontrivially broadcast")
958
+
959
+ # compute_types
960
+ cpu = torch.device("cpu")
961
+ common_device = cpu
962
+ common_dtype = None
963
+ output_dtype = None
964
+ has_different_input_dtypes = False
965
+ for op in operands:
966
+ if not isinstance(op, torch.Tensor):
967
+ # Use elementwise_dtypes for the tricky case
968
+ has_different_input_dtypes = True
969
+ continue
970
+ if common_device == cpu and not op.device.type == "cpu":
971
+ common_device = op.device
972
+ # Slightly simplified here as target_dtype cannot vary
973
+ if common_dtype is None:
974
+ common_dtype = op.dtype
975
+ elif common_dtype != op.dtype:
976
+ has_different_input_dtypes = True
977
+
978
+ if has_different_input_dtypes:
979
+ # compute promotion
980
+ # TODO: we don't need the compute type
981
+ _, common_dtype = elementwise_dtypes(
982
+ *operands, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
983
+ )
984
+
985
+ # check all tensors on same device
986
+ # cpu scalars are assumed allow
987
+ current_cpu_scalars_on_non_cpu = 0
988
+ max_cpu_scalars_on_non_cpu = 1 # hard coded atm
989
+ for op in operands:
990
+ if not isinstance(op, torch.Tensor):
991
+ continue
992
+ if common_device != cpu and op.dim() == 0 and op.device == cpu:
993
+ if current_cpu_scalars_on_non_cpu >= max_cpu_scalars_on_non_cpu:
994
+ return slow("error")
995
+ current_cpu_scalars_on_non_cpu += 1
996
+ elif op.device != common_device:
997
+ return slow("error")
998
+
999
+ # compute_fast_setup_type
1000
+ is_contiguous = True
1001
+ is_channels_last = True
1002
+ # TODO: is_non-overlapping_and_dense (not bound from Python
1003
+ # no inplace, no out, everything defined
1004
+
1005
+ if is_noncontiguous_supported(common_device):
1006
+ for op in operands:
1007
+ if not isinstance(op, torch.Tensor):
1008
+ continue
1009
+ is_contiguous = is_contiguous and op.is_contiguous(
1010
+ memory_format=torch.contiguous_format
1011
+ )
1012
+ is_channels_last = is_channels_last and op.is_contiguous(
1013
+ memory_format=torch.channels_last
1014
+ )
1015
+ if is_contiguous:
1016
+ # do contiguous
1017
+ count_label("fast is_contiguous")
1018
+ return FakeTensor(
1019
+ mode,
1020
+ torch.empty(
1021
+ final_shape,
1022
+ dtype=common_dtype,
1023
+ device="meta",
1024
+ memory_format=torch.contiguous_format,
1025
+ ),
1026
+ device=common_device,
1027
+ )
1028
+ if is_channels_last:
1029
+ count_label("fast channels_last")
1030
+ # do channels last
1031
+ return FakeTensor(
1032
+ mode,
1033
+ torch.empty(
1034
+ final_shape,
1035
+ dtype=common_dtype,
1036
+ device="meta",
1037
+ memory_format=torch.channels_last,
1038
+ ),
1039
+ device=common_device,
1040
+ )
1041
+
1042
+ return slow("no contiguity match")
1043
+
1044
+ return fast_binary_impl
1045
+
1046
+
1047
+ @functools.lru_cache(None)
1048
+ def get_fast_op_impls():
1049
+ import torch._refs
1050
+
1051
+ register_fast_op_impl(torch.ops.aten.add.Tensor)(
1052
+ make_fast_binary_impl(torch._refs.add)
1053
+ )
1054
+ register_fast_op_impl(torch.ops.aten.sub.Tensor)(
1055
+ make_fast_binary_impl(torch._refs.sub)
1056
+ )
1057
+ register_fast_op_impl(torch.ops.aten.mul.Tensor)(make_fast_binary_impl(torch._refs.mul)) # type: ignore[has-type]
1058
+ register_fast_op_impl(torch.ops.aten.div.Tensor)(
1059
+ make_fast_binary_impl(torch._refs.div)
1060
+ )
1061
+ return FAST_OP_IMPLEMENTATIONS
venv/lib/python3.10/site-packages/torch/_subclasses/fake_tensor.py ADDED
@@ -0,0 +1,1819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import functools
5
+ import logging
6
+ import os
7
+ import traceback
8
+ import weakref
9
+ from collections import defaultdict
10
+ from dataclasses import dataclass
11
+ from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING, TypeVar
12
+ from weakref import ReferenceType
13
+
14
+ import torch
15
+ import torch._custom_op
16
+ import torch._logging
17
+ from torch._C._functorch import is_functorch_wrapped_tensor
18
+
19
+ from torch._guards import Source
20
+ from torch._ops import OpOverload
21
+ from torch._prims_common import suggest_memory_format
22
+ from torch._subclasses.meta_utils import (
23
+ assert_eq,
24
+ assert_metadata_eq,
25
+ is_sparse_any,
26
+ is_sparse_compressed,
27
+ MetaConverter,
28
+ )
29
+ from torch._utils import render_call
30
+ from torch.fx.operator_schemas import normalize_function
31
+ from torch.multiprocessing.reductions import StorageWeakRef
32
+ from torch.overrides import TorchFunctionMode
33
+ from torch.utils._mode_utils import no_dispatch
34
+ from torch.utils._python_dispatch import (
35
+ is_traceable_wrapper_subclass,
36
+ TorchDispatchMode,
37
+ )
38
+
39
+ from torch.utils._pytree import PyTree, tree_map
40
+ from torch.utils._stats import count
41
+ from torch.utils.weak import WeakIdRef
42
+
43
+ if TYPE_CHECKING:
44
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
45
+
46
+ DimList = List
47
+
48
+ log = logging.getLogger(__name__)
49
+
50
+ # TODO: Hack to unblock https://github.com/pytorch/pytorch/pull/108186
51
+ # Proper fix tracked by https://github.com/pytorch/pytorch/issues/120105
52
+ try:
53
+ not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
54
+ except ValueError as e:
55
+ if "'not_implemented' not registered" in str(e):
56
+ import logging as not_implemented_log
57
+ else:
58
+ raise e
59
+
60
+ pytree = torch.utils._pytree
61
+ T = TypeVar("T")
62
+ TensorWeakRef = Any
63
+
64
+ aten = torch._ops.ops.aten
65
+
66
+ CONSTANT_NUMEL_LIMIT = 1
67
+
68
+ RECURSION_COUNT = 0
69
+
70
+
71
+ # Small helper that increments recursion count, and
72
+ # resets it when the object goes out of scope. Useful
73
+ # if you don't want to increase indentation which is
74
+ # what a context manager would do.
75
+ class IncrementRecursionCount:
76
+ def __init__(self):
77
+ global RECURSION_COUNT
78
+ RECURSION_COUNT += 1
79
+
80
+ def __del__(self):
81
+ global RECURSION_COUNT
82
+ RECURSION_COUNT -= 1
83
+
84
+
85
+ @dataclass
86
+ class UnsupportedFakeTensorException(RuntimeError):
87
+ reason: str
88
+
89
+
90
+ @dataclass
91
+ class DynamicOutputShapeException(RuntimeError):
92
+ func: OpOverload
93
+
94
+
95
+ @dataclass
96
+ class DataDependentOutputException(RuntimeError):
97
+ func: OpOverload
98
+
99
+
100
+ @dataclass
101
+ class UnsupportedOperatorException(RuntimeError):
102
+ func: OpOverload
103
+
104
+
105
+ def ordered_set(*items):
106
+ return dict.fromkeys(items, True)
107
+
108
+
109
+ @contextlib.contextmanager
110
+ def unset_fake_temporarily():
111
+ old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE)
112
+ try:
113
+ yield old
114
+ finally:
115
+ if old is not None:
116
+ torch._C._set_dispatch_mode(old)
117
+
118
+
119
+ def is_fake(x):
120
+ if isinstance(x, FakeTensor):
121
+ return True
122
+ if is_traceable_wrapper_subclass(x):
123
+ attrs, _ = type(x).__tensor_flatten__(x)
124
+ flattened_tensors = [getattr(x, attr) for attr in attrs]
125
+ # need to recurse because we could have nested subclasses
126
+ all_fake = all(is_fake(x) for x in flattened_tensors)
127
+ any_fake = any(is_fake(x) for x in flattened_tensors)
128
+ assert all_fake == any_fake, "got mixed fake and real tensors!"
129
+ return all_fake
130
+ elif isinstance(x, torch.Tensor) and torch._is_functional_tensor(x):
131
+ reapply_views = torch._C._functionalization_reapply_views_tls()
132
+ unwrapped = torch._C._functorch._unwrap_functional_tensor(x, reapply_views)
133
+ return is_fake(unwrapped)
134
+ elif isinstance(x, torch.Tensor) and is_functorch_wrapped_tensor(x):
135
+ unwrapped = torch._C._functorch.get_unwrapped(x)
136
+ return is_fake(unwrapped)
137
+ return False
138
+
139
+
140
+ def maybe_get_fake_mode(t):
141
+ if isinstance(t, FakeTensor):
142
+ return t.fake_mode
143
+ if is_traceable_wrapper_subclass(t):
144
+ inner_tensor_names, _ = t.__tensor_flatten__()
145
+ modes = [
146
+ maybe_get_fake_mode(getattr(t, t_name)) for t_name in inner_tensor_names
147
+ ]
148
+ m = modes[0]
149
+ assert all(m is x for x in modes)
150
+ return m
151
+ elif isinstance(t, torch.Tensor) and torch._is_functional_tensor(t):
152
+ reapply_views = torch._C._functionalization_reapply_views_tls()
153
+ unwrapped = torch._C._functorch._unwrap_functional_tensor(t, reapply_views)
154
+ return maybe_get_fake_mode(unwrapped)
155
+ elif isinstance(t, torch.Tensor) and is_functorch_wrapped_tensor(t):
156
+ unwrapped = torch._C._functorch.get_unwrapped(t)
157
+ return maybe_get_fake_mode(unwrapped)
158
+ return None
159
+
160
+
161
+ @functools.lru_cache(None)
162
+ def get_schema_info(func):
163
+ return torch._C._SchemaInfo(func._schema) # type: ignore[attr-defined]
164
+
165
+
166
+ # many of the decompositions registered to torch/_prims do not at the moment model
167
+ # aliasing or strides, so as an incremental step, just enable the decompositions in
168
+ # torch/_decomp/decompositions.py.
169
+ # decomps are used for aot autograd tracing so we would like to unify on their
170
+ # implementation and add additional testing to them
171
+ @functools.lru_cache(None)
172
+ def torch_decomp_decompositions(func):
173
+ from torch._decomp import decomposition_table
174
+
175
+ decompositions = torch._decomp.decompositions
176
+ # Note that the function in the decomposition table might be
177
+ # different from the one in the module because of the difference
178
+ # in out handling in aten API and torch public API
179
+ return decomposition_table[func].__module__.startswith(
180
+ "torch._decomp"
181
+ ) and decomposition_table[func].__name__ in dir(decompositions)
182
+
183
+
184
+ def tree_flatten_only(ty: Type[T], tree: PyTree):
185
+ flat_vals = pytree.tree_leaves(tree)
186
+ return [elem for elem in flat_vals if isinstance(elem, ty)]
187
+
188
+
189
+ # Similar to `MetaConverter`, this is a class for converting
190
+ # multiple tensors into fake tensors which share the same view/storage
191
+ # structure. Like `MetaConverter`, it uses `WeakIdRef` to
192
+ # hold a weak reference for all memoized tensors.
193
+ class FakeTensorConverter:
194
+ @property
195
+ def tensor_memo(self):
196
+ return self.meta_converter.tensor_memo
197
+
198
+ meta_converter: MetaConverter
199
+ constant_storage_mapping: Dict[StorageWeakRef, List[ReferenceType]]
200
+
201
+ def __init__(self):
202
+ self.meta_converter = MetaConverter()
203
+
204
+ # map from to storage to corresponding constant tensors
205
+ self.constant_storage_mapping = {}
206
+
207
+ def add_constant_storage_mapping(self, fake_tensor):
208
+ # when you have a constant, aliased tensor:
209
+ # const_tensor.add_(torch.rand([1]))
210
+ # all aliases of it must become no longer const
211
+ assert isinstance(fake_tensor, FakeTensor) and fake_tensor.constant is not None
212
+ weak_st = StorageWeakRef(fake_tensor.constant._typed_storage())
213
+
214
+ # we need a map from a weak storage to all of its corresponding
215
+ # constant tensors. python doesn't have the weak value equivalent
216
+ # of defaultdict(list), so we are using a WeakValueDictionary as one
217
+ if weak_st not in self.constant_storage_mapping:
218
+ self.constant_storage_mapping[weak_st] = []
219
+ self.constant_storage_mapping[weak_st].append(weakref.ref(fake_tensor))
220
+
221
+ def invalidate_constant_aliases(self, tensor):
222
+ assert not isinstance(tensor, FakeTensor)
223
+
224
+ weak_st = StorageWeakRef(tensor._typed_storage())
225
+ if weak_st not in self.constant_storage_mapping:
226
+ return
227
+
228
+ for weak_tensor_ref in self.constant_storage_mapping[weak_st]:
229
+ ten = weak_tensor_ref()
230
+ if ten is not None:
231
+ ten._fix_weakref()
232
+ ten.constant = None
233
+
234
+ del self.constant_storage_mapping[weak_st]
235
+
236
+ def _get_memo(self, t):
237
+ if WeakIdRef(t) in self.tensor_memo:
238
+ out = self.tensor_memo[WeakIdRef(t)]
239
+ out._fix_weakref()
240
+ return out
241
+ return None
242
+
243
+ def set_tensor_memo(self, t, v):
244
+ th = WeakIdRef(t)
245
+
246
+ # hold a weak ref to self, otherwise it will be kept alive
247
+ # by the del_ten closure
248
+ self_weak_ref = weakref.ref(self)
249
+
250
+ def del_ten():
251
+ self_ref = self_weak_ref()
252
+ if self_ref is None:
253
+ return
254
+ # on shutdown, th may not be in memo
255
+ self_ref.tensor_memo.pop(th, None)
256
+
257
+ weakref.finalize(t, del_ten)
258
+ self.tensor_memo[th] = v
259
+
260
+ def from_real_tensor(
261
+ self,
262
+ fake_mode,
263
+ t,
264
+ make_constant=False,
265
+ shape_env=None,
266
+ *,
267
+ source=None,
268
+ symbolic_context=None,
269
+ memoized_only=False,
270
+ ):
271
+ # see note [Tensor Fakification and Symbol Caching]
272
+ if not symbolic_context and not source and shape_env:
273
+ if tracing_context := torch._guards.TracingContext.try_get():
274
+ if t in tracing_context.tensor_to_context:
275
+ symbolic_context = tracing_context.tensor_to_context[t]
276
+ source = symbolic_context.tensor_source
277
+
278
+ maybe_memo = self._get_memo(t)
279
+ if maybe_memo is not None:
280
+ return maybe_memo
281
+ if memoized_only:
282
+ return None
283
+ existing_device = t.device
284
+ # not yet supported in metatensors
285
+ if t.is_quantized:
286
+ raise UnsupportedFakeTensorException("quantized nyi in meta tensors")
287
+ if type(t) is torch.nn.Parameter:
288
+ assert not make_constant
289
+
290
+ def mk_fake_tensor(make_meta_t):
291
+ # NB: don't use in_kernel_invocation_manager. to
292
+ # ensure FakeTensor can internally do constant computation
293
+ # as necessary. Invocation manager is "more correct" as
294
+ # it works for more operators in make_meta_t, but
295
+ # invariant is that make_meta_t only calls factories
296
+ # for which it is not strictly necessary to use the
297
+ # invocation manager (I think!)
298
+ with no_dispatch():
299
+ return FakeTensor(
300
+ fake_mode,
301
+ make_meta_t(),
302
+ existing_device,
303
+ constant=t if make_constant else None,
304
+ )
305
+
306
+ out = self.meta_converter(
307
+ t,
308
+ shape_env=shape_env,
309
+ callback=mk_fake_tensor,
310
+ source=source,
311
+ symbolic_context=symbolic_context,
312
+ )
313
+ if out is NotImplemented:
314
+ raise UnsupportedFakeTensorException("meta converter nyi")
315
+ if make_constant:
316
+ self.add_constant_storage_mapping(out)
317
+ # NB: meta_converter set the memo
318
+ return out
319
+
320
+ # If you specify the device, it MUST be a meta tensor.
321
+ def from_meta_and_device(self, fake_mode, t, device):
322
+ assert (
323
+ t.device.type == "meta"
324
+ ), f"tensor's device must be `meta`, got {t.device.type} instead"
325
+ maybe_memo = self._get_memo(t)
326
+ if maybe_memo is not None:
327
+ return maybe_memo
328
+ out = FakeTensor(fake_mode, t, device)
329
+ self.set_tensor_memo(t, out)
330
+ return out
331
+
332
+ # You can have a real tensor that you need to convert into a fake tensor.
333
+ # If you have a meta tensor already, call from_meta_and_device.
334
+ #
335
+ # You're allowed to pass a meta tensor to be turned into a fake
336
+ # tensor; although an odd thing to do, this can occur if you're doing
337
+ # cross ref testing and the inner test is already operating on meta tensors.
338
+ def __call__(
339
+ self,
340
+ fake_mode,
341
+ t,
342
+ *,
343
+ make_constant=False,
344
+ shape_env=None,
345
+ source=None,
346
+ symbolic_context=None,
347
+ memoized_only=False,
348
+ ):
349
+ return self.from_real_tensor(
350
+ fake_mode,
351
+ t,
352
+ make_constant,
353
+ shape_env=shape_env,
354
+ source=source,
355
+ symbolic_context=symbolic_context,
356
+ memoized_only=memoized_only,
357
+ )
358
+
359
+
360
+ @functools.lru_cache(None)
361
+ def init_cuda_context():
362
+ # Backward will error with cuda Fake Tensors if no cuda tensors have been initialized first
363
+ if torch.cuda.is_available():
364
+ torch.empty(1, device="cuda") if torch.version.hip is None else torch.zeros(
365
+ 1, device="cuda"
366
+ )
367
+
368
+
369
+ @contextlib.contextmanager
370
+ def in_kernel_invocation_manager(fake_mode):
371
+ # See: note [Fake Tensor Dispatch Keys]
372
+ prev_in_kernel = fake_mode.in_kernel_invocation
373
+ meta_in_tls = torch._C._meta_in_tls_dispatch_include()
374
+ assert meta_in_tls == prev_in_kernel, f"{meta_in_tls}, {prev_in_kernel}"
375
+
376
+ guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
377
+ fake_mode.in_kernel_invocation = True
378
+ torch._C._set_meta_in_tls_dispatch_include(True)
379
+ try:
380
+ yield
381
+ finally:
382
+ fake_mode.in_kernel_invocation = prev_in_kernel
383
+ torch._C._set_meta_in_tls_dispatch_include(prev_in_kernel)
384
+ del guard
385
+
386
+
387
+ # Return if the function allows Python numbers to bind to Tensors
388
+ def should_allow_numbers_as_tensors(func: OpOverload):
389
+ return torch._C._should_allow_numbers_as_tensors(
390
+ func.name().split("::")[-1].split(".")[0]
391
+ )
392
+
393
+
394
+ class FakeTensorConfig:
395
+ debug = os.environ.get("TORCH_FAKE_TENSOR_DEBUG", "0") == "1"
396
+
397
+
398
+ class FakeTensor(torch.Tensor):
399
+ """
400
+ Meta tensors give you the ability to run PyTorch code without having to
401
+ actually do computation through tensors allocated on a `meta` device.
402
+ Because the device is `meta`, meta tensors do not model device propagation.
403
+ FakeTensor extends MetaTensors to also carry an additional `fake_device`
404
+ which tracks devices that would have been used.
405
+ """
406
+
407
+ fake_device: torch.device
408
+ fake_mode: "FakeTensorMode"
409
+ constant: Optional[torch.Tensor]
410
+
411
+ # This memorizes the unbacked SymInt representing the number of nonzero
412
+ # elements in this tensor. This is helpful if you do something like
413
+ # x[mask] and y[mask]; mask.nonzero() gets repeatedly called and should
414
+ # give a consistent unbacked SymInt. It needs to be invalidated in the
415
+ # same way constant is.
416
+ # TODO: Generalize this as needed, e.g., into a trie of memos
417
+ _nonzero_memo: Optional[torch.SymInt]
418
+ _nonzero_memo_vc: Optional[int]
419
+
420
+ # Indicates to our torch_dispatch dispatching infra that
421
+ # this is an "infra" mode with lower dispatching precedence.
422
+ _mode_key = torch._C._TorchDispatchModeKey.FAKE
423
+
424
+ @property
425
+ def nonzero_memo(self):
426
+ if self._nonzero_memo is None:
427
+ return None
428
+ # Version counter based tracking isn't 100% sound but it's close
429
+ # enough
430
+ if self._nonzero_memo_vc != self._version:
431
+ self._nonzero_memo = None
432
+ return None
433
+ return self._nonzero_memo
434
+
435
+ @property
436
+ def device(self):
437
+ if self.fake_mode.in_kernel_invocation:
438
+ return torch.device("meta")
439
+ else:
440
+ return self.fake_device
441
+
442
+ # Note: [Fake Tensor Dispatch Keys]
443
+ # In order to model the behavior of device-specific autocast
444
+ # and autograd logic, we update the dispatch keys of FakeTensors
445
+ # to reflect their fake device. This includes the BackendComponent
446
+ # (DispatchKey::Meta -> DispatchKey::CUDA), and also the BackendComponent
447
+ # related Autocast and Autograd keys. __torch__dispatch__ sits below
448
+ # Autocast and Autograd, and is only invoked when we are at the
449
+ # kernel for the BackendComponent. Then, we add Meta to the
450
+ # thread-local dispatch include set to hit the meta kernel
451
+ # instead of the kernel of the BackendComponent for the fake device.
452
+ # The `device_for_backend_keys` does that below
453
+ # NOTE: this probably will not do the right thing for backends
454
+ # that have dispatch keys which are higher than the "meta" key:
455
+ # https://github.com/pytorch/pytorch/blob/main/c10/core/DispatchKey.h#L189
456
+
457
+ # We don't support named tensors; graph break
458
+ @property
459
+ def names(self):
460
+ raise UnsupportedFakeTensorException(
461
+ "torch.compile doesn't support named tensors"
462
+ )
463
+
464
+ @staticmethod
465
+ def __new__(cls, fake_mode, elem, device, constant=None):
466
+ self = torch.Tensor._make_subclass(
467
+ cls,
468
+ elem,
469
+ elem.requires_grad,
470
+ dispatch_device=True,
471
+ device_for_backend_keys=device,
472
+ )
473
+
474
+ assert elem.device.type == "meta", elem.device.type
475
+ device = device if isinstance(device, torch.device) else torch.device(device)
476
+ # NB: it is fine, if a little confusing, for device to be meta
477
+ # (we are faking a meta tensor in that case). However, it often
478
+ # indicates some sort of confusion (e.g., you accidentally passed
479
+ # in a meta tensor when you should have passed in the real tensor).
480
+ # So by default we disallow meta, and if you are working in a situation
481
+ # where it is helpful (e.g., crossref testing) you can turn it back
482
+ # on
483
+ if not fake_mode.allow_meta:
484
+ assert device.type != "meta"
485
+ # normalize device.
486
+ if device.type == "cuda":
487
+ init_cuda_context()
488
+
489
+ if (
490
+ device.type
491
+ in ["cuda", "hpu", "xpu", torch._C._get_privateuse1_backend_name()]
492
+ and device.index is None
493
+ ):
494
+ device = torch.device(
495
+ f"{device.type}:{getattr(torch, device.type).current_device()}"
496
+ )
497
+ self.fake_device = device # type: ignore[attr-defined]
498
+ self.fake_mode = fake_mode # type: ignore[attr-defined]
499
+ self.constant = constant # type: ignore[attr-defined]
500
+ self._nonzero_memo = None # type: ignore[attr-defined]
501
+ self._nonzero_memo_vc = None # type: ignore[attr-defined]
502
+
503
+ if FakeTensorConfig.debug:
504
+ import traceback
505
+
506
+ self._debug_trace = traceback.extract_stack() # type: ignore[attr-defined]
507
+ return self
508
+
509
+ # In some circumstances, a conventional torch.Tensor constructor
510
+ # will get rewritten to call into FakeTensor. We must provide an
511
+ # __init__ method that can accept the Python interpreters initialization
512
+ # in such a situation; we must also be able to handle direct fake
513
+ # tensor construction via FakeTensor().
514
+ #
515
+ # In particular, the __init__ call will look funny in the following case:
516
+ #
517
+ # with FakeTensorMode():
518
+ # x = torch.Tensor([1, 2, 3])
519
+ #
520
+ # this desugars into:
521
+ #
522
+ # with FakeTensorMode():
523
+ # x = torch.Tensor.__new__([1, 2, 3])
524
+ # # NB: x is a fake tensor, because of the mode!
525
+ # x.__init__([1, 2, 3]) # not the normal fake tensor args!
526
+ #
527
+ def __init__(self, *args, **kwargs):
528
+ super().__init__()
529
+
530
+ @staticmethod
531
+ def from_tensor(t, fake_mode):
532
+ return fake_mode.from_tensor(t)
533
+
534
+ @classmethod
535
+ @count
536
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
537
+ # need to handle here to avoid infinite recursion
538
+ # see [in_kernel_invocation]
539
+ if func == torch.ops.prim.device.default:
540
+ assert len(args) == 1 and isinstance(args[0], FakeTensor)
541
+ if args[0].fake_mode.in_kernel_invocation:
542
+ return torch.device("meta")
543
+ else:
544
+ return args[0].fake_device
545
+
546
+ # Because fake mode can return NotImplemented (if it sees a subclass
547
+ # it doesn't know how to deal with), this test here is important
548
+ # because the next dispatch after a fake mode will attempt to use
549
+ # subclasses of tensors to dispatch, and any FakeTensor arguments
550
+ # will be considered eligible.
551
+ unrecognized_types = [
552
+ t for t in types if not issubclass(t, FakeTensor) and t is not torch.Tensor
553
+ ]
554
+ if unrecognized_types:
555
+ not_implemented_log.debug(
556
+ "FakeTensor unrecognized subclass(es): %s", unrecognized_types
557
+ )
558
+ return NotImplemented
559
+
560
+ fake_mode = None
561
+ for arg in pytree.arg_tree_leaves(*args, **kwargs):
562
+ if isinstance(arg, FakeTensor):
563
+ fake_mode = arg.fake_mode
564
+ break
565
+
566
+ assert fake_mode is not None
567
+
568
+ # If the fake mode is already active, don't try to reapply it!
569
+ # NotImplemented is the right thing to return here, because the
570
+ # typical situation this can occur is if ProxyTensorMode returned a
571
+ # NotImplemented because of a not implemented subclass; we may have
572
+ # unluckily attempted to hit FakeTensor's dispatch first,
573
+ # NotImplemented lets us keep chaining until we find the actual
574
+ # subclass
575
+ maybe_cur_fake_mode = torch._C._get_dispatch_mode(
576
+ torch._C._TorchDispatchModeKey.FAKE
577
+ )
578
+ if maybe_cur_fake_mode:
579
+ not_implemented_log.debug(
580
+ "FakeTensor mode already active: %s in %s",
581
+ fake_mode,
582
+ maybe_cur_fake_mode,
583
+ )
584
+ return NotImplemented
585
+
586
+ with fake_mode: # type: ignore[attr-defined]
587
+ return func(*args, **kwargs)
588
+
589
+ @staticmethod
590
+ def _find_common_device(func, flat_args) -> Tuple[torch.device, bool]:
591
+ # Returns: (common_device, has_scalar_only_inputs)
592
+
593
+ # cpu - zero-dim tensors can be called in cuda kernels,
594
+ # so overwrite the common_device if it the only existing
595
+ # device comes from a cpu zero-dim tensor
596
+ common_device = None
597
+ has_scalar_only_inputs = False
598
+ is_cpu_zero_dim = None
599
+
600
+ def cpu_zero_dim(t):
601
+ return t.device.type == "cpu" and t.dim() == 0
602
+
603
+ def merge_devices(t):
604
+ nonlocal common_device
605
+ nonlocal is_cpu_zero_dim
606
+ if not isinstance(t, FakeTensor):
607
+ return
608
+
609
+ if common_device is None:
610
+ common_device = t.device
611
+ is_cpu_zero_dim = cpu_zero_dim(t)
612
+ return
613
+
614
+ t_is_cpu_zero_dim = cpu_zero_dim(t)
615
+ if t.device == common_device:
616
+ if is_cpu_zero_dim:
617
+ is_cpu_zero_dim = t_is_cpu_zero_dim
618
+ return
619
+
620
+ # mismatching devices !
621
+ # if current tensor is cpu 0 dim, defer to existing device
622
+ if t_is_cpu_zero_dim:
623
+ return
624
+
625
+ # current device is from cpu 0 dim tensor, overwrite
626
+ if is_cpu_zero_dim:
627
+ common_device = t.device
628
+ is_cpu_zero_dim = t_is_cpu_zero_dim
629
+ return
630
+
631
+ # mismatching devices of non-zero dim tensors, throw
632
+ # This might be valid behavior and need to be explicitly modeled, e.g. reshape_as
633
+ raise RuntimeError(
634
+ f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}"
635
+ )
636
+
637
+ for arg in flat_args:
638
+ merge_devices(arg)
639
+
640
+ # some functions that allow Python numbers to bind to Tensors
641
+ # if we have failed to find a device, and we're running one of these operators,
642
+ # we must have scalar only inputs
643
+ if should_allow_numbers_as_tensors(func) and common_device is None:
644
+ # ops with scalar only inputs always have result on cpu
645
+ has_scalar_only_inputs = True
646
+ common_device = torch.device("cpu")
647
+
648
+ assert common_device is not None, f"Could not find common device for {func}"
649
+
650
+ return common_device, has_scalar_only_inputs
651
+
652
+ # We must handle tolist in a special way for FakeTensors here in the case
653
+ # where tolist is called from torch dispatch for tensor subclasses.
654
+ # Ordinarily, if a program calls .tolist compiling still works because there is
655
+ # special handling in dynamo, but for tensor subclasses if .tolist is called
656
+ # inside torch dispatch, the .tolist call may be directly on a FakeTensor.
657
+ # This would result in an error since wrapper subclasses don't have storage.
658
+ # To avoid this, we handle the FakeTensor case by (1) specializing on the size
659
+ # of the tensor to create the output Python list, and (2) creating unbacked
660
+ # symints for each element of the list.
661
+ def tolist(self):
662
+ assert self.dim() == 1, "NYI for higher dims"
663
+ shape_env = self.fake_mode.shape_env
664
+ out = []
665
+ # Specialize on the length of the list
666
+ for _ in range(self.shape[0]):
667
+ s = shape_env.create_unbacked_symint()
668
+ # max value?
669
+ torch._constrain_as_size(s, min=2)
670
+ out.append(s)
671
+ return out
672
+
673
+
674
+ @dataclass(frozen=True)
675
+ class TensorMetadata:
676
+ """
677
+ The Tensor metadata relevant to hashing FakeTensors when caching.
678
+ """
679
+
680
+ dtype: torch.dtype
681
+ shape: torch.Size
682
+ stride: Tuple[Any, ...]
683
+ device: torch.device
684
+ layout: torch.layout
685
+ memory_format: Optional[torch.memory_format]
686
+ storage_offset: int
687
+ requires_grad: bool
688
+ is_quantized: bool
689
+ is_conj: bool
690
+ is_neg: bool
691
+ is_inference: bool
692
+ is_sparse: bool # read: is sparse COO
693
+ is_coalesced: Optional[bool]
694
+ dense_dim: Optional[int]
695
+ sparse_dim: Optional[int]
696
+
697
+
698
+ def extract_tensor_metadata(t: torch.Tensor) -> "TensorMetadata":
699
+ """
700
+ Extract the TensorMetadata of a tensor.
701
+ """
702
+ memory_format = suggest_memory_format(t)
703
+ if is_sparse_any(t) or not t.is_contiguous(memory_format=memory_format):
704
+ memory_format = None
705
+
706
+ return TensorMetadata(
707
+ dtype=t.dtype,
708
+ shape=t.shape,
709
+ stride=t.stride() if t.layout == torch.strided else (),
710
+ device=t.device,
711
+ layout=t.layout,
712
+ memory_format=memory_format,
713
+ storage_offset=t.storage_offset(),
714
+ requires_grad=t.requires_grad,
715
+ is_quantized=t.is_quantized,
716
+ is_conj=t.is_conj(),
717
+ is_neg=t.is_neg(),
718
+ is_inference=t.is_inference(),
719
+ is_sparse=t.is_sparse,
720
+ is_coalesced=t.is_coalesced() if t.is_sparse else None,
721
+ dense_dim=t.dense_dim() if t.is_sparse else None,
722
+ sparse_dim=t.sparse_dim() if t.is_sparse else None,
723
+ )
724
+
725
+
726
+ @dataclass(frozen=True)
727
+ class _ShapeEnvSettings:
728
+ """
729
+ Encapsulates all shape env settings that could potentially affect
730
+ FakeTensor dispatch. Used when creating dispatch cache keys.
731
+ """
732
+
733
+ allow_scalar_outputs: bool
734
+ allow_dynamic_output_shape_ops: bool
735
+ assume_static_by_default: bool
736
+ specialize_zero_one: bool
737
+ duck_shape: bool
738
+
739
+ def __init__(self, env: "ShapeEnv"):
740
+ # Initialize this way because the class is frozen (to enable hashing):
741
+ object.__setattr__(self, "allow_scalar_outputs", env.allow_scalar_outputs)
742
+ object.__setattr__(
743
+ self, "allow_dynamic_output_shape_ops", env.allow_dynamic_output_shape_ops
744
+ )
745
+ object.__setattr__(
746
+ self, "assume_static_by_default", env.assume_static_by_default
747
+ )
748
+ object.__setattr__(self, "specialize_zero_one", env.specialize_zero_one)
749
+ object.__setattr__(self, "duck_shape", env.duck_shape)
750
+
751
+
752
+ class _DispatchCacheKey(list):
753
+ """
754
+ Key for the FakeTensor dispatch cache. Inspired by (copied from)
755
+ _HashedSeq from the functools.lru_cache implementation.
756
+ """
757
+
758
+ __slots__ = "hashvalue" # noqa: PLC0205
759
+
760
+ def __init__(self, tup, hash=hash):
761
+ self[:] = tup
762
+ self.hashvalue = hash(tup)
763
+
764
+ def __hash__(self):
765
+ return self.hashvalue
766
+
767
+
768
+ @dataclass(frozen=True)
769
+ class _DispatchCacheEntry:
770
+ """
771
+ Entry type for the FakeTensor dispatch cache. Accounts for two possibilities:
772
+ 1) The op is inplace, and a hit means we need to alias the argument at a given
773
+ index. 2) We need to synthesize a new FakeTensor given tensor metadata. For view
774
+ ops, we further capture the index of the arg to alias.
775
+ """
776
+
777
+ inplace_idx: Optional[int] = None
778
+ metadata: Optional[TensorMetadata] = None
779
+ view_idx: Optional[int] = None
780
+
781
+
782
+ @dataclass(frozen=True)
783
+ class _BypassDispatchCache(Exception):
784
+ """
785
+ Signals cases that should skip FakeTensor caching.
786
+ """
787
+
788
+ reason: str
789
+
790
+
791
+ @dataclass(frozen=True)
792
+ class DispatchCacheInfo:
793
+ """
794
+ Information about the state of the FakeTensor dispatch cache.
795
+ """
796
+
797
+ hits: int
798
+ misses: int
799
+ bypasses: Dict[str, int]
800
+ size: int
801
+
802
+
803
+ # We keep one instantiation of `fake_tensor_converter` active
804
+ # for the duration of `with FakeTensorMode()`.
805
+ # This allows accurate storage aliasing across invocation of
806
+ # different operators. While this will keep all freshly allocated
807
+ # tensors alive during `FakeTensorMode`, there will no be no
808
+ # new allocations of Tensors which have non-meta storage so
809
+ # memory should not significantly increase.
810
+
811
+
812
+ class FakeTensorMode(TorchDispatchMode):
813
+ cache: Dict[_DispatchCacheKey, _DispatchCacheEntry] = {}
814
+ cache_hits: int = 0
815
+ cache_misses: int = 0
816
+ cache_bypasses = defaultdict(int)
817
+
818
+ def __init__(
819
+ self,
820
+ *,
821
+ allow_fallback_kernels=True,
822
+ allow_non_fake_inputs=False,
823
+ shape_env=None,
824
+ static_shapes=None,
825
+ ):
826
+ log.debug("create_mode 0x%x", id(self))
827
+ self.allow_fallback_kernels = allow_fallback_kernels
828
+ self.fake_tensor_converter = FakeTensorConverter()
829
+ if static_shapes is not None:
830
+ self.static_shapes = static_shapes
831
+ else:
832
+ self.static_shapes = shape_env is None
833
+
834
+ import torch._dynamo.config
835
+ import torch._functorch.config
836
+
837
+ self.allow_meta = torch._functorch.config.fake_tensor_allow_meta
838
+ self.cache_enabled = torch._dynamo.config.fake_tensor_cache_enabled
839
+ self.cache_crosscheck_enabled = (
840
+ torch._dynamo.config.fake_tensor_cache_crosscheck_enabled
841
+ )
842
+
843
+ # A flag that controls, whether we want to invoke ops on mix of
844
+ # real weights/global variables and fake inputs
845
+ self.allow_non_fake_inputs = allow_non_fake_inputs
846
+
847
+ # [in_kernel_invocation]
848
+ # when FakeTensor is invoked in user code, .device should return
849
+ # the fake_device of the tensor so that code such as as `if x.is_cuda`
850
+ # or torch.zeros([10, 10], device=x.device) continues to execute as if
851
+ # the FakeTensor were real. However, within kernel execution, we return
852
+ # the `Meta` device because all computation within the kernels should
853
+ # behave as if the Tensors are on meta devices. Kernels should allocate
854
+ # new tensors on meta devices, and checks like `is_meta` should return true.
855
+ # within python refs, we always return the real device by defining
856
+ # the device property
857
+ self.in_kernel_invocation = False
858
+
859
+ # True if we enter'ed and actually enabled fake tensor mode,
860
+ # false if it was a no-op. Not thread safe but neither is
861
+ # in_kernel_invocation
862
+ # If another fake mode was already active when we enter, we also stash it here.
863
+ # That way when we exit, we know to re-enable the previous fake mode.
864
+ self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = []
865
+
866
+ self.shape_env = shape_env
867
+
868
+ self.stack = "".join(traceback.format_stack())
869
+
870
+ # Indicates to our torch_dispatch dispatching infra that
871
+ # this is an "infra" mode with lower dispatching precedence.
872
+ self._mode_key = torch._C._TorchDispatchModeKey.FAKE
873
+
874
+ # Typically, there is only one fake tensor mode and you test for it by
875
+ # doing an isinstance test. However, in some situations, there might be
876
+ # TWO fake tensor modes. The canonical example of this is exporting
877
+ # a fake model: there is an outer fake mode created by the user, and
878
+ # an inner fake mode created by Dynamo. The two phase process is required
879
+ # because the outer fake mode typically won't have a ShapeEnv, even if
880
+ # the user is interested in exporting with dynamic shapes (so the inner
881
+ # fake mode will actually have a ShapeEnv and swap in symbolic sizes.)
882
+ #
883
+ # In this case, it's insufficient to test only one FakeTensor: you need
884
+ # to distinguish between our fake tensor and other fake tensors. That's
885
+ # what this function does.
886
+ def is_our_fake(self, t):
887
+ return isinstance(t, FakeTensor) and t.fake_mode is self
888
+
889
+ @count
890
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
891
+ # FakeTensorMode should not be set when we're inside of it.
892
+ assert (
893
+ torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is None
894
+ ), func
895
+ try:
896
+ return self.dispatch(func, types, args, kwargs)
897
+ except TypeError:
898
+ log.exception("fake tensor raised TypeError")
899
+ raise
900
+
901
+ # No-op if FakeTensorMode is already in use
902
+ def __enter__(self):
903
+ maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key)
904
+ if self is not maybe_prev_fake_mode:
905
+ self.enter_stack.append((True, maybe_prev_fake_mode))
906
+ return super().__enter__()
907
+ else:
908
+ # no-op (still need to re-set the fake mode though since we unset it)
909
+ torch._C._set_dispatch_mode(self)
910
+ self.enter_stack.append((False, None))
911
+ return self
912
+
913
+ def __exit__(self, a, b, c):
914
+ live, maybe_prev_fake_mode = self.enter_stack.pop()
915
+ if live:
916
+ out = super().__exit__(a, b, c)
917
+ # Re-enable the previous fake mode, if there was one.
918
+ if maybe_prev_fake_mode is not None:
919
+ torch._C._set_dispatch_mode(maybe_prev_fake_mode)
920
+
921
+ @classmethod
922
+ def cache_info(cls) -> DispatchCacheInfo:
923
+ """
924
+ Query the state of the dispatch cache.
925
+ """
926
+ return DispatchCacheInfo(
927
+ FakeTensorMode.cache_hits,
928
+ FakeTensorMode.cache_misses,
929
+ dict(FakeTensorMode.cache_bypasses),
930
+ len(FakeTensorMode.cache),
931
+ )
932
+
933
+ @classmethod
934
+ def cache_clear(cls):
935
+ """
936
+ Clear the dispatch cache.
937
+ """
938
+ cls.cache_hits = 0
939
+ cls.cache_misses = 0
940
+ cls.cache_bypasses.clear()
941
+ cls.cache.clear()
942
+
943
+ def _cached_dispatch_impl(
944
+ self,
945
+ func: OpOverload,
946
+ types: Tuple[Any, ...],
947
+ args: Tuple[Any, ...],
948
+ kwargs: Dict[str, Any],
949
+ ):
950
+ """
951
+ Lookup a cache entry for the given arguments. If none exists, dispatch
952
+ and cache the result (if the result is eligible for caching).
953
+ """
954
+ output = unassigned = object()
955
+ try:
956
+ key = self._cache_key(func, args, kwargs)
957
+ entry = FakeTensorMode.cache.get(key, None)
958
+ if entry is not None:
959
+ output = self._output_from_cache_entry(entry, func, args)
960
+ FakeTensorMode.cache_hits += 1
961
+ if self.cache_crosscheck_enabled:
962
+ # For debugging / testing: Validate that the output synthesized
963
+ # from the cache matches the output created by normal dispatch.
964
+ self._crosscheck_cache_output(output, func, types, args, kwargs)
965
+ else:
966
+ output = self._dispatch_impl(func, types, args, kwargs)
967
+ entry = self._make_cache_entry(key, func, args, kwargs, output)
968
+ FakeTensorMode.cache[key] = entry
969
+ FakeTensorMode.cache_misses += 1
970
+ except _BypassDispatchCache as e:
971
+ FakeTensorMode.cache_bypasses[e.reason] += 1
972
+
973
+ if output is unassigned:
974
+ output = self._dispatch_impl(func, types, args, kwargs)
975
+
976
+ return output
977
+
978
+ def _cache_key(
979
+ self,
980
+ func: OpOverload,
981
+ args: Tuple[Any, ...],
982
+ kwargs: Dict[str, Any],
983
+ ) -> _DispatchCacheKey:
984
+ """
985
+ Create a cache key given the dispatch args. Raises _BypassDispatchCache
986
+ for any situation that precludes caching.
987
+ """
988
+ # Avoid caching for any ops that would require a more sophisticated
989
+ # caching implementation, e.g., data dependent ops or ops that modify
990
+ # the inputs.
991
+ if torch.Tag.data_dependent_output in func.tags:
992
+ raise _BypassDispatchCache("data dependent output")
993
+
994
+ if torch.Tag.dynamic_output_shape in func.tags:
995
+ raise _BypassDispatchCache("dynamic output shape")
996
+
997
+ if torch.Tag.inplace_view in func.tags:
998
+ raise _BypassDispatchCache("inplace view")
999
+
1000
+ if func == aten._unsafe_view.default:
1001
+ raise _BypassDispatchCache("unsafe view")
1002
+
1003
+ if func in self.lift_fns:
1004
+ raise _BypassDispatchCache("lift")
1005
+
1006
+ if not torch._library.utils.is_builtin(func):
1007
+ raise _BypassDispatchCache("non-builtin")
1008
+
1009
+ # In order to handle storage aliasing, we need to establish the alias
1010
+ # for any view op on a cache hit. But CompositeImplicitAutograd ops may
1011
+ # or may not alias the input, so just punt on caching these.
1012
+ if func.is_view and torch._C._dispatch_has_kernel_for_dispatch_key(
1013
+ func.name(), torch._C.DispatchKey.CompositeImplicitAutograd
1014
+ ):
1015
+ raise _BypassDispatchCache("CompositeImplicitAutograd")
1016
+
1017
+ key_values = (
1018
+ func,
1019
+ # Translate any FakeTensor args to metadata.
1020
+ self._prep_args_for_hash(args) if args else (),
1021
+ self._prep_args_for_hash(kwargs) if kwargs else (),
1022
+ # Capture the default_dtype mode since that can affect the output tensor,
1023
+ # e.g., when operating on constant float values.
1024
+ torch.get_default_dtype(),
1025
+ # Capture the current device to support, e.g., cache tensor creation,
1026
+ # where there isn't necessarily a tensor to take the device from.
1027
+ torch._C._get_default_device(),
1028
+ # We want to create tensors from cached metadata only when the inference
1029
+ # mode is the same.
1030
+ torch.is_inference_mode_enabled(),
1031
+ # Shape env settings could affect behavior. One example seen in the wild:
1032
+ # Disasllowing dynamic shapes can introduce a DynamicOutputShapeException
1033
+ # where it wasn't seen on a previous instance of the same op.
1034
+ _ShapeEnvSettings(self.shape_env) if self.shape_env else None,
1035
+ )
1036
+ return _DispatchCacheKey(key_values)
1037
+
1038
+ def _prep_args_for_hash(self, args: Any) -> Any:
1039
+ """
1040
+ Translate the provided args into a form suitable for caching at FakeTensor
1041
+ dispatch, i.e., convert unhashable types like lists & dicts into tuples and
1042
+ convert FakeTensors into metadata. Raises _BypassDispatchCache to signal
1043
+ unsupported cases that should bypass caching.
1044
+ """
1045
+ if isinstance(args, dict):
1046
+ args = list(args.keys()) + list(args.values())
1047
+
1048
+ result = []
1049
+ for arg in args:
1050
+ if isinstance(arg, FakeTensor):
1051
+ if not self.is_our_fake(arg):
1052
+ raise _BypassDispatchCache("not our fake")
1053
+ if arg._has_symbolic_sizes_strides:
1054
+ raise _BypassDispatchCache("symbolic shape")
1055
+ if arg.constant is not None:
1056
+ raise _BypassDispatchCache("constant attribute")
1057
+ if arg.is_sparse:
1058
+ raise _BypassDispatchCache("sparse tensor")
1059
+ if is_sparse_compressed(arg):
1060
+ raise _BypassDispatchCache("sparse compressed tensor")
1061
+ result.append(extract_tensor_metadata(arg))
1062
+ elif isinstance(arg, torch.Tensor):
1063
+ raise _BypassDispatchCache("non-fake tensor")
1064
+ elif isinstance(arg, (torch.SymBool, torch.SymInt, torch.SymFloat)):
1065
+ raise _BypassDispatchCache("symbolic shape")
1066
+ elif isinstance(arg, (list, tuple, dict)):
1067
+ result.extend(self._prep_args_for_hash(arg))
1068
+ else:
1069
+ # It's important to capture the type of the arg since, e.g., 1 and 1.0
1070
+ # hash to the same value, but can produce different dtypes for the
1071
+ # output tensor.
1072
+ result.append((type(arg), arg))
1073
+
1074
+ return tuple(result)
1075
+
1076
+ def _make_cache_entry(
1077
+ self,
1078
+ key: _DispatchCacheKey,
1079
+ func: OpOverload,
1080
+ args: Tuple[Any, ...],
1081
+ kwargs: Dict[str, Any],
1082
+ output: FakeTensor,
1083
+ ) -> _DispatchCacheEntry:
1084
+ """
1085
+ Make a cache entry object for the given 'output' Tensor. Raises
1086
+ _BypassDispatchCache if the output tensor has characteristics that
1087
+ prevent caching it.
1088
+ """
1089
+ # Some ops return tuples of Tensors, but it's rare, so avoid
1090
+ # the complexity of caching other types.
1091
+ if not isinstance(output, FakeTensor):
1092
+ raise _BypassDispatchCache("non-FakeTensor output")
1093
+
1094
+ # Avoid caching FakeTensors with constants attached since those
1095
+ # can be invalidated.
1096
+ if output.constant is not None:
1097
+ raise _BypassDispatchCache("constant attribute")
1098
+
1099
+ # TODO: support caching sparse outputs?
1100
+ if output.is_sparse:
1101
+ raise _BypassDispatchCache("sparse output")
1102
+
1103
+ if is_sparse_compressed(output):
1104
+ raise _BypassDispatchCache("sparse compressed output")
1105
+
1106
+ # Can an in-place op really reference a kwarg? If so, then we need
1107
+ # to extend the implementation to handle it.
1108
+ for kval in kwargs.values():
1109
+ if id(kval) == id(output):
1110
+ raise _BypassDispatchCache("kwarg aliases output")
1111
+
1112
+ # If this is an in-place op, the entry records which input arg is aliased.
1113
+ for idx in range(len(args)):
1114
+ if id(args[idx]) == id(output):
1115
+ return _DispatchCacheEntry(
1116
+ inplace_idx=idx, metadata=None, view_idx=None
1117
+ )
1118
+
1119
+ # Otherwise, create an entry that records the output tensor's metadata.
1120
+ view_idx = None
1121
+ if func.is_view:
1122
+ idxs = [i for i, t in enumerate(args) if isinstance(t, torch.Tensor)]
1123
+ assert len(idxs) == 1
1124
+ view_idx = idxs[0]
1125
+
1126
+ metadata = extract_tensor_metadata(output)
1127
+ entry = _DispatchCacheEntry(
1128
+ inplace_idx=None, metadata=metadata, view_idx=view_idx
1129
+ )
1130
+
1131
+ # N.B.: Some checks for bypassing the cache would be performed on the
1132
+ # output tensor synthesized from the cached metadata. As an optimization,
1133
+ # we can synthesize a tensor here and do the checks on that instance.
1134
+ # This approach keeps the (more frequent) cache-hit path as lightweight
1135
+ # as possible.
1136
+ synth_output = self._output_from_cache_entry(entry, func, args)
1137
+
1138
+ # Make sure the dispatch_key_set from the synthesized output tensor will
1139
+ # be the same.
1140
+ synth_key_set = torch._C._dispatch_key_set(synth_output)
1141
+ key_set = torch._C._dispatch_key_set(output)
1142
+ if synth_key_set != key_set:
1143
+ raise _BypassDispatchCache("dispatch_key_set mismatch")
1144
+
1145
+ return entry
1146
+
1147
+ def _output_from_cache_entry(
1148
+ self, entry: _DispatchCacheEntry, func: OpOverload, args: Tuple[Any, ...]
1149
+ ) -> FakeTensor:
1150
+ """
1151
+ Create a new FakeTensor from the cache entry.
1152
+ """
1153
+ if entry.inplace_idx is not None:
1154
+ # This is an in-place op; return the aliased arg.
1155
+ return args[entry.inplace_idx]
1156
+
1157
+ # Synthesize a new FakeTensor with the cached metadata.
1158
+ metadata = entry.metadata
1159
+ assert not metadata.is_sparse
1160
+
1161
+ empty = torch.empty_strided(
1162
+ metadata.shape,
1163
+ metadata.stride,
1164
+ dtype=metadata.dtype,
1165
+ layout=metadata.layout,
1166
+ device="meta",
1167
+ requires_grad=metadata.requires_grad,
1168
+ )
1169
+
1170
+ if metadata.is_conj:
1171
+ torch._C._set_conj(empty, True)
1172
+ if metadata.is_neg:
1173
+ torch._C._set_neg(empty, True)
1174
+
1175
+ if func.is_view:
1176
+ # For view ops, the storage should be the same as the tensor input.
1177
+ storage = args[entry.view_idx].untyped_storage()
1178
+ with in_kernel_invocation_manager(self):
1179
+ empty.set_(
1180
+ storage, metadata.storage_offset, metadata.shape, metadata.stride
1181
+ )
1182
+ elif metadata.storage_offset != 0:
1183
+ storage = empty.untyped_storage()
1184
+ with in_kernel_invocation_manager(self):
1185
+ empty.set_(
1186
+ storage, metadata.storage_offset, metadata.shape, metadata.stride
1187
+ )
1188
+
1189
+ return FakeTensor(self, empty, metadata.device)
1190
+
1191
+ def _crosscheck_cache_output(
1192
+ self,
1193
+ output: FakeTensor,
1194
+ func: OpOverload,
1195
+ types: Tuple[Any, ...],
1196
+ args: Tuple[Any, ...],
1197
+ kwargs: Dict[str, Any],
1198
+ ):
1199
+ """
1200
+ Helper to validate that the output synthesized from the cache matches
1201
+ the output created by normal dispatch.
1202
+ """
1203
+ try:
1204
+ true_output = self._dispatch_impl(func, types, args, kwargs)
1205
+ except Exception as e:
1206
+ raise RuntimeError(
1207
+ f"FakeTensor cache crosscheck failure: func={func}, "
1208
+ f"args={args}, kwargs={kwargs}: Dispatch raised={e}"
1209
+ ) from e
1210
+ try:
1211
+ assert_metadata_eq(assert_eq, true_output, output)
1212
+ except Exception as e:
1213
+ raise RuntimeError(
1214
+ f"FakeTensor cache crosscheck failure: func={func}, "
1215
+ f"args={args}, kwargs={kwargs}"
1216
+ ) from e
1217
+
1218
+ def dispatch(self, func, types, args=(), kwargs=None):
1219
+ kwargs = kwargs or {}
1220
+ with no_dispatch():
1221
+ log.debug("%s %s %s", func, args, kwargs)
1222
+
1223
+ if func in _DISPATCH_META_HANDLERS:
1224
+ return _DISPATCH_META_HANDLERS[func](args)
1225
+
1226
+ if log.getEffectiveLevel() <= logging.DEBUG:
1227
+ log.debug(
1228
+ "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func
1229
+ )
1230
+ # NOTE: incr is intentionally unused for a RAII pattern
1231
+ incr = IncrementRecursionCount()
1232
+
1233
+ # Some attribute queries that can be serviced directly
1234
+ # See Note [is_coalesced is dispatched]
1235
+ if func in _DISPATCH_HANDLE_DIRECTLY:
1236
+ # NB: no_dispatch is ok here too, this func is very simple
1237
+ with in_kernel_invocation_manager(self):
1238
+ return func(*args, **kwargs)
1239
+
1240
+ if self.cache_enabled:
1241
+ return self._cached_dispatch_impl(func, types, args, kwargs)
1242
+ else:
1243
+ return self._dispatch_impl(func, types, args, kwargs)
1244
+
1245
+ def _dispatch_impl(self, func, types, args, kwargs):
1246
+ flat_args, args_spec = pytree.tree_flatten((args, kwargs))
1247
+
1248
+ flat_arg_fake_tensors = [
1249
+ t for t in flat_args if isinstance(t, FakeTensor) and self.is_our_fake(t)
1250
+ ]
1251
+ has_symbolic_sizes = any(
1252
+ i._has_symbolic_sizes_strides for i in flat_arg_fake_tensors
1253
+ ) or any(isinstance(a, torch.SymInt) for a in flat_args)
1254
+
1255
+ converter = self.fake_tensor_converter
1256
+
1257
+ def maybe_to_constant(t):
1258
+ if isinstance(t, FakeTensor) and self.is_our_fake(t):
1259
+ return t.constant
1260
+ else:
1261
+ return t
1262
+
1263
+ # To constant propagate through these functions:
1264
+ # 1, If this is a lift due to a torch.tensor call,
1265
+ # the input tensor is guaranteed to be a
1266
+ # constant, so we keep a copy of the original argument along so
1267
+ # we can query it if we're asked to item() it at some later point.
1268
+ # (Note that you can always call a lift fn manually, so we do
1269
+ # have to check if there are any fake tensors!)
1270
+ # 2, Some functions that allow Python numbers to bind to Tensors, e.g, torch.div
1271
+ if (func in self.lift_fns and not flat_arg_fake_tensors) or (
1272
+ should_allow_numbers_as_tensors(func)
1273
+ and not has_symbolic_sizes
1274
+ and not flat_arg_fake_tensors
1275
+ ):
1276
+ assert all(
1277
+ t.constant is not None for t in flat_arg_fake_tensors
1278
+ ), f"{func} should not have fake inputs without constants"
1279
+ const_flat_args = [maybe_to_constant(a) for a in flat_args]
1280
+ const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec)
1281
+ out = func(*const_args, **const_kwargs)
1282
+ if type(out) is torch.Tensor and self.may_turn_const(out):
1283
+ # NB: not in_kernel_invocation_manager because we're doing real
1284
+ # compute here
1285
+ # NB: no_dispatch() here is VERY DANGEROUS (like, segfault
1286
+ # dangerous) if this is actually a wrapper subclass tensor,
1287
+ # therefore the exact type test above
1288
+ with no_dispatch():
1289
+ out = out.clone()
1290
+ return converter(self, out, make_constant=True)
1291
+
1292
+ # See [subclass inputs] below
1293
+ # NB: If you're seeing a mysterious infinite loop involving fake
1294
+ # tensor, it might be related to this line. Though I'm not sure
1295
+ # how you'll know to read this comment, as this line won't show up
1296
+ # in the stack trace.
1297
+ unrecognized_types = self.check_for_subclass(flat_args)
1298
+ if unrecognized_types:
1299
+ not_implemented_log.debug(
1300
+ "FakeTensorMode unrecognized subclass(es): %s", unrecognized_types
1301
+ )
1302
+ return NotImplemented
1303
+
1304
+ # if we are in the dispatch mode, we will enter this function even if the inputs
1305
+ # are not FakeTensors. For now, throw if any non-Fake Tensor inputs
1306
+ # and just support constructors.
1307
+
1308
+ # this is generated from torch.tensor(), which does not use the
1309
+ # dispatcher, to allow wrapper subclasses to wrap the new tensor
1310
+ if func in self.lift_fns:
1311
+ assert len(kwargs) == 0 and len(args) == 1, f"{args} {kwargs}"
1312
+
1313
+ if type(args[0]) is torch.Tensor:
1314
+ return converter(self, args[0])
1315
+
1316
+ # Recompute flat_arg_fake_tensors here again in case some of the inputs
1317
+ # were real tensors and fakified in validate_and_convert_non_fake_tensors
1318
+ (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors(
1319
+ func, converter, flat_args, args_spec
1320
+ )
1321
+ del args, kwargs # Invalidated
1322
+
1323
+ # The current constant handling only support tracing systems
1324
+ # (aot autograd, torchdynamo) where each operation is run consecutively.
1325
+ # Because each operation is run in order, we can trace out and support
1326
+ # sequences like: x = torch.tensor(0.); y = x.add_(1)
1327
+ # Whenver a constant is written to but with inputs that cannot be evaluated
1328
+ # statically, such as random_(), we invalidate all constants that alias the input
1329
+ # We will rely on functionalization for use of fake tensors constants as persistent
1330
+ # objects on an FX Graph.
1331
+
1332
+ # We dispatch size/stride/numel on the FakeTensor not its constant, so bail on inplace_view
1333
+ all_constant = all(e.constant is not None for e in flat_arg_fake_tensors)
1334
+ if (
1335
+ torch.Tag.nondeterministic_seeded not in func.tags
1336
+ and torch.Tag.inplace_view not in func.tags
1337
+ and all_constant
1338
+ and len(flat_arg_fake_tensors) != 0
1339
+ and not has_symbolic_sizes
1340
+ ):
1341
+ const_flat_args = [maybe_to_constant(a) for a in flat_args]
1342
+ const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec)
1343
+
1344
+ # NB: not in_kernel_invocation_manager(self) as we want to do REAL
1345
+ # compute
1346
+ with no_dispatch():
1347
+ out = func(*const_args, **const_kwargs)
1348
+
1349
+ flat_out = pytree.tree_leaves(out)
1350
+ flat_out_tensors = [t for t in flat_out if isinstance(t, torch.Tensor)]
1351
+ all_constant = all(self.may_turn_const(t) for t in flat_out_tensors)
1352
+
1353
+ if all_constant:
1354
+ return pytree.tree_map_only(
1355
+ torch.Tensor,
1356
+ lambda t: converter(self, t, make_constant=True),
1357
+ out,
1358
+ )
1359
+
1360
+ # we weren't able to turn outputs to constants,
1361
+ # so invalidate all constants that might be aliases of the outputs
1362
+ for ten in flat_out_tensors:
1363
+ converter.invalidate_constant_aliases(ten)
1364
+
1365
+ # we are falling through to running non constant tensors, any input constant that
1366
+ # is written to must be invalidated
1367
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1368
+ self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs)
1369
+
1370
+ # Try for fastpath
1371
+ if has_symbolic_sizes:
1372
+ fast_impl = get_fast_op_impls().get(func)
1373
+ if fast_impl is not None:
1374
+ return fast_impl(self, *args, **kwargs)
1375
+
1376
+ # If there's a Python meta, prefer that over the decomposition
1377
+ from torch._decomp import meta_table as meta_table
1378
+
1379
+ if func not in meta_table and not self.cpp_meta_supports_symint(func):
1380
+ from torch._decomp import decomposition_table
1381
+
1382
+ # Prefer Python decompositions over C++ ones
1383
+ if func in decomposition_table and (
1384
+ has_symbolic_sizes
1385
+ or (
1386
+ # TODO: Remove these exclusions, so that we can remove
1387
+ # this leg entirely
1388
+ torch_decomp_decompositions(func)
1389
+ and all(not e.is_sparse for e in flat_arg_fake_tensors)
1390
+ )
1391
+ ):
1392
+ with self:
1393
+ return decomposition_table[func](*args, **kwargs)
1394
+
1395
+ with self:
1396
+ # Decomposes CompositeImplicitAutograd ops
1397
+ r = func.decompose(*args, **kwargs)
1398
+ if r is not NotImplemented:
1399
+ return r
1400
+
1401
+ # prims already wrap FakeTensor inputs to FakeTensor outputs
1402
+ # and do device logic, we dont need do anything but run them
1403
+ # and ensure that Meta kernels are dispatched to (see)
1404
+ # Fake Tensor Dispatch Keys
1405
+ # TODO - we should be use the prim aten impl
1406
+ # TODO - fix prims complex ops
1407
+ if (
1408
+ "prims::" in func._schema.name
1409
+ and hasattr(func, "prim_meta_impl")
1410
+ and not stride_incorrect_op(func)
1411
+ ):
1412
+ with self:
1413
+ return func.prim_meta_impl(*args, **kwargs)
1414
+
1415
+ # Users can register FakeTensor rules for custom operators
1416
+ # Call them if they exist.
1417
+ maybe_abstract_impl = torch._library.simple_registry.singleton.find(
1418
+ func.name()
1419
+ ).abstract_impl.kernel
1420
+ if maybe_abstract_impl:
1421
+ ctx = torch._library.abstract_impl.AbstractImplCtx(self.shape_env, func)
1422
+ with torch._library.abstract_impl.set_ctx_getter(lambda: ctx), self:
1423
+ result = maybe_abstract_impl(*args, **kwargs)
1424
+ return result
1425
+
1426
+ # special handling for funcs registered through `register_op_impl`,
1427
+ # e.g., manipulating args on constructor calls to construct meta tensors
1428
+ # and then afterwards wrapping them to a FakeTensor
1429
+ for run_impl_check, op_impl in op_implementations_checks:
1430
+ if run_impl_check(func):
1431
+ op_impl_out = op_impl(self, func, *args, **kwargs)
1432
+ if op_impl_out != NotImplemented:
1433
+ return op_impl_out
1434
+
1435
+ def maybe_run_unsafe_fallback(error=None):
1436
+ # We infer the meta of a custom ops that return None to just
1437
+ # return None. custom ops are not allowed to mutate metadata
1438
+ # of their inputs, so this is safe.
1439
+ if can_generate_trivial_abstract_impl(func):
1440
+ return None
1441
+ # no meta kernel registered, fallback to kernel for the device
1442
+ if has_symbolic_sizes or not self.can_run_unsafe_fallback(func):
1443
+ raise UnsupportedOperatorException(func)
1444
+ if error is None:
1445
+ error = UnsupportedOperatorException(func)
1446
+ return run_fallback_kernel(self, func, flat_args, args_spec, error)
1447
+
1448
+ # Optimization: If there is no Meta kernel, it takes a surprisingly long
1449
+ # amount of time to catch the NotImplementedError, so we check it here.
1450
+ if not has_meta(func):
1451
+ return maybe_run_unsafe_fallback()
1452
+
1453
+ # run kernel registered to meta for func, which include
1454
+ # python meta registrations, prims, decomps, and c++ meta fns (structured kernels)
1455
+ # It's possible that the kernel will return NotImplementedError
1456
+ try:
1457
+ with in_kernel_invocation_manager(self):
1458
+ r = func(*args, **kwargs)
1459
+ except NotImplementedError as not_implemented_error:
1460
+ return maybe_run_unsafe_fallback(not_implemented_error)
1461
+
1462
+ return self.wrap_meta_outputs_with_default_device_logic(
1463
+ r, func, flat_args, device=kwargs.get("device")
1464
+ )
1465
+
1466
+ # WARNING: DO NOT add any additional namespaces/operators here if they refer to operators
1467
+ # outside of the pytorch/pytorch library! Any pre-existing things here
1468
+ # are either in the pytorch/pytorch library or have been grandfathered in.
1469
+ # The fallback does not always work and MAY CRASH and emit unreadable error messages
1470
+ # so it should not be allowed by default.
1471
+ _can_run_unsafe_fallback_allowed_namespaces = ordered_set(
1472
+ "debugprims",
1473
+ "prims",
1474
+ "aten",
1475
+ "xla",
1476
+ "vision",
1477
+ "torchtext",
1478
+ "torchaudio",
1479
+ "quantized",
1480
+ )
1481
+
1482
+ def can_run_unsafe_fallback(self, func: OpOverload):
1483
+ if not self.allow_fallback_kernels:
1484
+ return False
1485
+ # It's OK to try the fallback for built-in ops (e.g. aten, prims)
1486
+ # because we control and test these but the fallback leads to unexpected behavior
1487
+ # in user-defined custom ops
1488
+ return (
1489
+ func.namespace in self._can_run_unsafe_fallback_allowed_namespaces
1490
+ or func.name() == "fbgemm::gmm"
1491
+ )
1492
+
1493
+ # [subclass inputs]
1494
+ # Suppose we enable fake tensor mode. This means that fake tensor
1495
+ # mode will run first. But what if we do an operation that
1496
+ # involves a tensor subclass that will desugar into normal tensor
1497
+ # operations? Without returning NotImplemented, fake tensor mode will run first,
1498
+ # decide that a conversion was made (since there was a non fake
1499
+ # tensor argument), and report an error that converting non
1500
+ # fake tensor is not supported. What we actually wanted to happen
1501
+ # was to give the subclass a chance to figure out what it wants to
1502
+ # before erroring out. Returning NotImplemented here allows this.
1503
+ def check_for_subclass(self, flat_args):
1504
+ def check(x):
1505
+ return (
1506
+ isinstance(x, torch.Tensor)
1507
+ and not isinstance(x, FakeTensor)
1508
+ and type(x) is not torch.Tensor
1509
+ and type(x) is not torch.nn.Parameter
1510
+ )
1511
+
1512
+ return [type(x) for x in flat_args if check(x)]
1513
+
1514
+ def validate_and_convert_non_fake_tensors(
1515
+ self, func, converter, flat_args, args_spec
1516
+ ):
1517
+ """
1518
+ Checks if the list of tensors are fake tensors.
1519
+ If not, try to convert them to fake tensors.
1520
+ Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors.
1521
+ """
1522
+ flat_arg_fake_tensors = []
1523
+
1524
+ def validate(x):
1525
+ if not isinstance(x, torch.Tensor):
1526
+ return x
1527
+
1528
+ nonlocal flat_arg_fake_tensors
1529
+ if not self.is_our_fake(x):
1530
+ if torch.Tag.inplace_view in func.tags:
1531
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1532
+ raise Exception(
1533
+ f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}"
1534
+ )
1535
+ if not self.allow_non_fake_inputs:
1536
+ if isinstance(x, FakeTensor) and x.fake_mode is not self:
1537
+ raise AssertionError("Mixing fake modes NYI")
1538
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1539
+ raise Exception(
1540
+ f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode "
1541
+ f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}"
1542
+ )
1543
+
1544
+ x = converter(self, x)
1545
+
1546
+ flat_arg_fake_tensors.append(x)
1547
+ return x
1548
+
1549
+ validated_args = [validate(a) for a in flat_args]
1550
+ return validated_args, flat_arg_fake_tensors
1551
+
1552
+ def wrap_meta_outputs_with_default_device_logic(self, r, func, flat_args, device):
1553
+ converter = self.fake_tensor_converter
1554
+
1555
+ # Lazily initialized, in case there are no tensor returns
1556
+ common_device = None
1557
+ has_scalar_only_inputs = False
1558
+
1559
+ def wrap(e):
1560
+ nonlocal common_device
1561
+ nonlocal has_scalar_only_inputs
1562
+
1563
+ if isinstance(e, torch.Tensor) and common_device is None:
1564
+ (
1565
+ common_device,
1566
+ has_scalar_only_inputs,
1567
+ ) = FakeTensor._find_common_device(func, flat_args)
1568
+
1569
+ if self.is_our_fake(e):
1570
+ torch._check(
1571
+ e.device == common_device,
1572
+ lambda: f"FakeTensor is wrapped to wrong device, found {e.device}, expected {common_device}",
1573
+ )
1574
+
1575
+ if (
1576
+ isinstance(e, torch.Tensor)
1577
+ and not self.is_our_fake(e)
1578
+ and converter is not None
1579
+ ):
1580
+ if has_scalar_only_inputs:
1581
+ # Under FakeTensorMode, op accepts scalar only inputs, such as aten.add/sub/mul/div,
1582
+ # returns a real scalar tensor on CPU. See TensorMeta() in _prims/__init__.py for details.
1583
+ # We thus directly convert real tensor to fake tensor.
1584
+ return converter(self, e)
1585
+ else:
1586
+ return converter.from_meta_and_device(
1587
+ self, e, device or common_device
1588
+ )
1589
+ else:
1590
+ return e
1591
+
1592
+ return tree_map(wrap, r)
1593
+
1594
+ _cpp_meta_supports_symint = ordered_set(
1595
+ aten.empty.memory_format,
1596
+ aten.empty_strided.default,
1597
+ aten.as_strided_scatter.default,
1598
+ aten.as_strided.default,
1599
+ aten.as_strided_.default,
1600
+ aten.zeros.default,
1601
+ aten.detach.default,
1602
+ aten.view_as_real.default,
1603
+ aten.view_as_complex.default,
1604
+ aten.set_.source_Storage_storage_offset,
1605
+ aten._sparse_coo_tensor_with_dims_and_tensors.default,
1606
+ )
1607
+
1608
+ def cpp_meta_supports_symint(self, func):
1609
+ if torch.Tag.view_copy in func.tags:
1610
+ return True
1611
+ return func in self._cpp_meta_supports_symint
1612
+
1613
+ lift_fns = ordered_set(aten.lift_fresh.default, aten.lift_fresh_copy.default)
1614
+
1615
+ def may_turn_const(self, t):
1616
+ return (
1617
+ t.numel() <= CONSTANT_NUMEL_LIMIT
1618
+ and not t.is_sparse
1619
+ and not self.is_our_fake(t)
1620
+ and not t.device.type == "meta"
1621
+ )
1622
+
1623
+ def invalidate_written_to_constants(
1624
+ self, func, flat_arg_fake_tensors, args, kwargs
1625
+ ):
1626
+ any_constant = any(e.constant is not None for e in flat_arg_fake_tensors)
1627
+ schema_info = get_schema_info(func)
1628
+ if any_constant and schema_info.is_mutable():
1629
+ _, new_kwargs = normalize_function(
1630
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1631
+ )
1632
+ for k, v in new_kwargs.items():
1633
+ k = k if (k != "input" or schema_info.has_argument(k)) else "self"
1634
+ if (
1635
+ self.is_our_fake(v)
1636
+ and schema_info.is_mutable(k)
1637
+ and v.constant is not None
1638
+ ):
1639
+ self.fake_tensor_converter.invalidate_constant_aliases(v.constant)
1640
+
1641
+ def from_tensor(
1642
+ self,
1643
+ tensor,
1644
+ *,
1645
+ static_shapes=None,
1646
+ source: Optional[Source] = None,
1647
+ symbolic_context=None,
1648
+ # Setting this flag will force FakeTensorMode to return `None` if attempting to convert a tensor we have not
1649
+ # seen before.
1650
+ memoized_only=False,
1651
+ ):
1652
+ shape_env = self.shape_env
1653
+ if static_shapes is None:
1654
+ static_shapes = self.static_shapes
1655
+ if static_shapes:
1656
+ assert (
1657
+ symbolic_context is None
1658
+ ), "cannot set both static_shapes and symbolic_context"
1659
+ shape_env = None
1660
+ # see note [Tensor Fakification and Symbol Caching]
1661
+ if not symbolic_context and not source and not static_shapes:
1662
+ if tracing_context := torch._guards.TracingContext.try_get():
1663
+ if tensor in tracing_context.tensor_to_context:
1664
+ symbolic_context = tracing_context.tensor_to_context[tensor]
1665
+ source = symbolic_context.tensor_source
1666
+ return self.fake_tensor_converter(
1667
+ self,
1668
+ tensor,
1669
+ shape_env=shape_env,
1670
+ source=source,
1671
+ symbolic_context=symbolic_context,
1672
+ memoized_only=memoized_only,
1673
+ )
1674
+
1675
+
1676
+ # NB: returns fake tensors
1677
+ def run_fallback_kernel(
1678
+ fake_mode, func, flat_args, args_spec, orig_not_implemented_exception
1679
+ ):
1680
+ # these should all be supported, just to be safe
1681
+ # avoid fallback for operators which inplace modify metadata
1682
+ # because the input fake tensors would be umodified
1683
+ if torch.Tag.inplace_view in func.tags:
1684
+ raise orig_not_implemented_exception
1685
+
1686
+ inp_impls = {}
1687
+
1688
+ # Don't use in_kernel_invocation_manager(fake_mode) as we want to do
1689
+ # REAL compute (not with meta device)
1690
+ with no_dispatch():
1691
+
1692
+ def to_real_tensor(e):
1693
+ if fake_mode.is_our_fake(e):
1694
+ out = torch.zeros_like(e, device=e.fake_device)
1695
+ if e.is_sparse:
1696
+ out._coalesced_(e.is_coalesced())
1697
+ inp_impls[id(out)] = e
1698
+ return out
1699
+ return e
1700
+
1701
+ flat_args = [to_real_tensor(a) for a in flat_args]
1702
+ args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
1703
+
1704
+ r = func(*args, **kwargs)
1705
+
1706
+ tensor_impls = set()
1707
+ storages = set()
1708
+
1709
+ for e in flat_args:
1710
+ if isinstance(e, torch.Tensor):
1711
+ if not e.is_sparse:
1712
+ storages.add(e._typed_storage()._cdata)
1713
+
1714
+ # TODO: also check metadata change on inputs
1715
+ # proper aliasing/metadata relationship between outputs and inputs will
1716
+ # not be set up, bc of conversion to device, unless we can reuse an
1717
+ # input impl
1718
+
1719
+ def map_out(e):
1720
+ if id(e) not in inp_impls and (
1721
+ isinstance(e, torch.Tensor)
1722
+ and not e.is_sparse
1723
+ and e._typed_storage()._cdata in storages
1724
+ ):
1725
+ raise orig_not_implemented_exception
1726
+
1727
+ if isinstance(e, torch.Tensor):
1728
+ if id(e) in inp_impls:
1729
+ return inp_impls[id(e)]
1730
+ else:
1731
+ return fake_mode.fake_tensor_converter(fake_mode, e)
1732
+ else:
1733
+ return e
1734
+
1735
+ return pytree.tree_map(map_out, r)
1736
+
1737
+
1738
+ def can_generate_trivial_abstract_impl(op: torch._ops.OpOverload) -> bool:
1739
+ assert isinstance(op, torch._ops.OpOverload)
1740
+ if torch._library.utils.is_builtin(op):
1741
+ # We control the built-ins. These may (in rare cases)
1742
+ # do input metadata mutation (which we have banned on custom ops)
1743
+ return False
1744
+ schema = op._schema
1745
+ # It's suspicious if the op is not mutable but returns nothing, so we return False out of an abundance of caution
1746
+ if not schema.is_mutable:
1747
+ return False
1748
+ if len(schema.returns) > 0:
1749
+ return False
1750
+ # If the op returns nothing, then it has a trivial abstract impl.
1751
+ return True
1752
+
1753
+
1754
+ # Just for use to allow copying a module to fake tensors,
1755
+ # does not apply elsewhere
1756
+ class FakeCopyMode(TorchFunctionMode):
1757
+ def __init__(self, fake_mode):
1758
+ self.fake_mode = fake_mode
1759
+
1760
+ def __torch_function__(self, func, types, args=(), kwargs=None):
1761
+ kwargs = kwargs if kwargs else {}
1762
+
1763
+ # clone will get called in Parameter deepcopy
1764
+ if func == torch._C.TensorBase.clone:
1765
+ return func(
1766
+ self.fake_mode.from_tensor(args[0], static_shapes=True), **kwargs
1767
+ )
1768
+ elif func == torch.Tensor.__deepcopy__:
1769
+ assert len(args) == 2 and len(kwargs) == 0
1770
+ tensor, memo = args
1771
+
1772
+ if id(tensor) in memo:
1773
+ return memo[id(tensor)]
1774
+
1775
+ out = self.fake_mode.from_tensor(tensor, static_shapes=True)
1776
+ memo[id(tensor)] = out
1777
+ return out
1778
+ else:
1779
+ with torch._C.DisableTorchFunctionSubclass():
1780
+ return func(*args, **kwargs)
1781
+
1782
+
1783
+ def _device_handler(args):
1784
+ # NB: Don't use is_our_fake, just serve the fake information
1785
+ # as is. Notice we don't use 'self'; we use args[0].fake_mode
1786
+ # because they may not be the same. It would also be possible
1787
+ # to return NotImplemented here, in which case the FakeTensor
1788
+ # handler on args[0] would handle it, but we're being nice and
1789
+ # short-circuiting quickly.
1790
+ assert len(args) == 1 and isinstance(args[0], FakeTensor)
1791
+ if args[0].fake_mode.in_kernel_invocation:
1792
+ return torch.device("meta")
1793
+ else:
1794
+ return args[0].fake_device
1795
+
1796
+
1797
+ _DISPATCH_META_HANDLERS = {
1798
+ torch.ops.prim.device.default: _device_handler,
1799
+ torch.ops.aten.size.default: lambda args: tuple(int(s) for s in args[0].size()),
1800
+ torch.ops.aten.stride.default: lambda args: tuple(int(s) for s in args[0].stride()),
1801
+ torch.ops.aten.storage_offset.default: lambda args: int(args[0].storage_offset()),
1802
+ }
1803
+
1804
+ _DISPATCH_HANDLE_DIRECTLY = ordered_set(
1805
+ torch.ops.aten.is_coalesced.default,
1806
+ torch.ops.aten.dense_dim.default,
1807
+ torch.ops.aten.sparse_dim.default,
1808
+ )
1809
+
1810
+ from torch._subclasses.fake_impls import ( # noqa: F401
1811
+ _device_not_kwarg_ops, # noqa: F401
1812
+ _is_tensor_constructor, # noqa: F401
1813
+ _like_tensor_constructors, # noqa: F401
1814
+ contains_tensor_types, # noqa: F401
1815
+ get_fast_op_impls,
1816
+ has_meta,
1817
+ op_implementations_checks,
1818
+ stride_incorrect_op,
1819
+ )
venv/lib/python3.10/site-packages/torch/_subclasses/fake_utils.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import warnings
5
+ from typing import Callable, Union
6
+
7
+ import torch
8
+ import torch.utils._pytree as pytree
9
+ from torch._ops import OpOverload
10
+ from torch._subclasses.fake_tensor import (
11
+ FakeTensorMode,
12
+ tree_flatten_only,
13
+ UnsupportedFakeTensorException,
14
+ )
15
+ from torch.utils._python_dispatch import TorchDispatchMode
16
+
17
+
18
+ aten = torch._ops.ops.aten
19
+
20
+
21
+ def outputs_alias_inputs(outputs, inputs):
22
+ input_storages = {
23
+ inp._typed_storage()._cdata
24
+ for inp in tree_flatten_only(torch.Tensor, inputs)
25
+ if torch._C._has_storage(inp)
26
+ }
27
+ return any(
28
+ torch._C._has_storage(out) and out._typed_storage()._cdata in input_storages
29
+ for out in tree_flatten_only(torch.Tensor, outputs)
30
+ )
31
+
32
+
33
+ def outputs_are_inputs(outputs, inputs):
34
+ input_ids = {id(inp) for inp in tree_flatten_only(torch.Tensor, inputs)}
35
+ return any(id(out) in input_ids for out in tree_flatten_only(torch.Tensor, outputs))
36
+
37
+
38
+ def output_alias_each_other(outputs):
39
+ storages = set()
40
+ for out in tree_flatten_only(torch.Tensor, outputs):
41
+ if not torch._C._has_storage(out):
42
+ continue
43
+ stor = out._typed_storage()._cdata
44
+ if stor in storages:
45
+ return True
46
+ storages.add(stor)
47
+ return False
48
+
49
+
50
+ def is_sdpa_error(func, idx, e):
51
+ if (
52
+ (
53
+ func is aten._scaled_dot_product_flash_attention.default
54
+ or func is aten._flash_attention_forward.default
55
+ )
56
+ and idx in (6, 7)
57
+ and "Devices" in repr(e)
58
+ ):
59
+ return True
60
+ if (
61
+ (
62
+ func is aten._scaled_dot_product_efficient_attention.default
63
+ or func is aten._efficient_attention_forward.default
64
+ )
65
+ and idx in (2, 3)
66
+ and "Devices" in repr(e)
67
+ ):
68
+ return True
69
+ return False
70
+
71
+
72
+ class CrossRefFakeMode(TorchDispatchMode):
73
+ def __init__(
74
+ self,
75
+ ignore_op_fn: Union[Callable[[OpOverload], bool], None] = None,
76
+ *,
77
+ check_strides=True,
78
+ check_aliasing=True,
79
+ ):
80
+ self.ignore_op_fn = (
81
+ ignore_op_fn if ignore_op_fn is not None else lambda fn: False
82
+ )
83
+ self.check_strides = check_strides
84
+ self.check_aliasing = check_aliasing
85
+
86
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
87
+ kwargs = kwargs or {}
88
+
89
+ fake_r = None
90
+
91
+ # empty_like excluded for now due to sparse complex
92
+ # aten._to_dense.default this one is getting called with csc
93
+ if (
94
+ func
95
+ not in (
96
+ aten.lift_fresh.default,
97
+ aten.lift_fresh_copy.default,
98
+ aten.set_.source_Storage_storage_offset,
99
+ )
100
+ and not self.ignore_op_fn(func)
101
+ and torch.Tag.dynamic_output_shape not in func.tags
102
+ and torch.Tag.inplace_view not in func.tags
103
+ and torch.Tag.data_dependent_output not in func.tags
104
+ ):
105
+ # Do not import symbolic_shapes at the top of the module as it imports sympy and that's slow
106
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
107
+
108
+ try:
109
+ # TODO: enable_python_dispatcher() here
110
+ with FakeTensorMode(shape_env=ShapeEnv()) as fake_mode:
111
+ fake_args, fake_kwargs = pytree.tree_map_only(
112
+ torch.Tensor,
113
+ functools.partial(fake_mode.from_tensor, static_shapes=True),
114
+ (args, kwargs),
115
+ )
116
+ with warnings.catch_warnings():
117
+ fake_r = func(*fake_args, **fake_kwargs)
118
+ except UnsupportedFakeTensorException:
119
+ pass
120
+
121
+ context = (
122
+ f"When comparing the output of {func} on FakeTensor and concrete Tensors, "
123
+ f"found"
124
+ )
125
+ r = func(*args, **kwargs)
126
+ if fake_r is not None:
127
+ r_flat = pytree.tree_leaves(r)
128
+ f_flat = pytree.tree_leaves(fake_r)
129
+ assert len(f_flat) == len(
130
+ r_flat
131
+ ), f"{context} mismatch in number of returns {len(f_flat)} != {len(r_flat)}"
132
+
133
+ if self.check_aliasing:
134
+ r_aliasing = outputs_alias_inputs(r, (args, kwargs))
135
+ f_aliasing = outputs_alias_inputs(fake_r, (fake_args, fake_kwargs))
136
+ assert (
137
+ r_aliasing == f_aliasing
138
+ ), f"{context} mismatch in outputs_alias_inputs check {f_aliasing} != {r_aliasing}"
139
+
140
+ r_identity_eq = outputs_are_inputs(r, (args, kwargs))
141
+ f_identity_eq = outputs_are_inputs(fake_r, (fake_args, fake_kwargs))
142
+ assert (
143
+ r_identity_eq == f_identity_eq
144
+ ), f"{context} mismatch in outputs_are_inputs check {f_identity_eq} != {r_identity_eq}"
145
+
146
+ r_output_alias_each_other = output_alias_each_other(r)
147
+ f_output_alias_each_other = output_alias_each_other(fake_r)
148
+ assert r_output_alias_each_other == f_output_alias_each_other, (
149
+ f"{context} mismatch in outputs_alias_each_other check "
150
+ f"{f_output_alias_each_other} != {r_output_alias_each_other}"
151
+ )
152
+
153
+ for idx, (r_out, fake_out) in enumerate(
154
+ zip(pytree.tree_leaves(r), pytree.tree_leaves(fake_r))
155
+ ):
156
+ r_is_ten = isinstance(r_out, torch.Tensor)
157
+ assert r_is_ten == isinstance(
158
+ fake_out, torch.Tensor
159
+ ), f"{context} mismatched number of tensor outputs"
160
+ if r_is_ten:
161
+ assert r_out.requires_grad == fake_out.requires_grad, (
162
+ f"{context} mismatched requires_grad-ness of outputs. "
163
+ f"This usually means that you have added autograd support "
164
+ f"for your operator at a dispatch key other than Autograd, "
165
+ f"which will lead to problems"
166
+ )
167
+ if torch._C._has_storage(r_out):
168
+ r_offset = r_out.storage_offset()
169
+ f_offset = fake_out.storage_offset()
170
+ assert (
171
+ r_offset == f_offset
172
+ ), f"{context} mismatched storage offset"
173
+
174
+ try:
175
+ torch._prims.utils.compare_tensor_meta(
176
+ r_out,
177
+ fake_out,
178
+ check_strides=self.check_strides,
179
+ allow_rhs_unbacked=True,
180
+ )
181
+ except Exception as e:
182
+ if is_sdpa_error(func, idx, e):
183
+ continue
184
+ error_message = (
185
+ f"{context} mismatched tensor metadata: {e}"
186
+ if len(r_flat) == 1
187
+ else f"{context} mismatched tensor metadata for output[{idx}]: {e}"
188
+ )
189
+ raise RuntimeError(error_message) from e
190
+ return r
venv/lib/python3.10/site-packages/torch/_subclasses/functional_tensor.py ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any, Callable, ContextManager, Dict, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.utils._pytree as pytree
7
+ from torch._C import _functionalization_reapply_views_tls as _reapply_views
8
+ from torch._ops import _get_dispatch_mode_pre_dispatch
9
+ from torch.utils._python_dispatch import (
10
+ _detect_functional_mode,
11
+ _disable_infra_mode,
12
+ return_and_correct_aliasing,
13
+ TorchDispatchMode,
14
+ )
15
+
16
+ not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
17
+
18
+
19
+ class FunctionalTensor(torch.Tensor):
20
+ """
21
+ Functional tensors represent tensors that will remove mutations
22
+ from a program. If you perform a mutable operation on a functional tensor,
23
+ it will re-dispatch to the functional variant of that operation.
24
+
25
+ Historically, functionalization is implemented in C++ in the dispatcher.
26
+ This class is a lightweight python shim around the C++ functionalization logic.
27
+
28
+ FunctionalTensor is required to be used with a corresponding
29
+ FunctionalTensormode active, because it relies
30
+ on using the mode for dispatch (which can properly handle factory functions).
31
+ """
32
+
33
+ elem: torch.Tensor
34
+ # Indicates to our torch_dispatch dispatching infra that
35
+ # this is an "infra" mode with lower dispatching precedence.
36
+ _mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL
37
+
38
+ # Note: The reason we add these extra keys to our FunctionalTensor subclass
39
+ # is to mirror the behavior of C++ functionalization (we can choose to change this
40
+ # later, as long as it doesn't break anything).
41
+ # FunctionalTensorWrapper copies **all** dispatch keys from the inner tensor
42
+ # to the wrapper, excluding functorch and python dispatch keys.
43
+ # Here I'm trying to re-use the keyset the functorch wrapper subclasses copy,
44
+ # except that they don't include ZeroTensor so I'm manually adding it in.
45
+ _extra_dispatch_keys = torch._C._additional_keys_to_prop_for_wrapper_tensors.add(
46
+ torch._C.DispatchKey.ZeroTensor
47
+ )
48
+
49
+ # These are all aten ops that correspond to metadata queries.
50
+ # We want FunctionalTensor to be able to handle them directly.
51
+ metadata_fns = [
52
+ torch.ops.aten.is_contiguous.default, # type: ignore[has-type]
53
+ torch.ops.aten.is_contiguous.memory_format, # type: ignore[has-type]
54
+ torch.ops.aten.is_strides_like_format.default, # type: ignore[has-type]
55
+ torch.ops.aten.is_non_overlapping_and_dense.default, # type: ignore[has-type]
56
+ torch.ops.aten.size.default, # type: ignore[has-type]
57
+ torch.ops.aten.sym_size.default, # type: ignore[has-type]
58
+ torch.ops.aten.stride.default, # type: ignore[has-type]
59
+ torch.ops.aten.sym_stride.default, # type: ignore[has-type]
60
+ torch.ops.aten.storage_offset.default, # type: ignore[has-type]
61
+ torch.ops.aten.sym_storage_offset.default, # type: ignore[has-type]
62
+ torch.ops.aten.numel.default, # type: ignore[has-type]
63
+ torch.ops.aten.sym_numel.default, # type: ignore[has-type]
64
+ torch.ops.aten.dim.default, # type: ignore[has-type]
65
+ torch.ops.prim.device.default, # type: ignore[has-type]
66
+ ]
67
+
68
+ # These are ops that claim to be functional, but actually are maybe-mutating/maybe-aliasing
69
+ # TODO (tmanlaibaatar) make it a tag
70
+ maybe_aliasing_or_mutating_ops = [
71
+ torch.ops.aten.dropout.default, # type: ignore[has-type]
72
+ torch.ops.aten.batch_norm.default, # type: ignore[has-type]
73
+ torch.ops.aten.native_batch_norm.default, # type: ignore[has-type]
74
+ torch.ops.aten._batch_norm_impl_index.default, # type: ignore[has-type]
75
+ torch.ops.aten.cudnn_batch_norm.default, # type: ignore[has-type]
76
+ torch.ops.aten.miopen_batch_norm.default, # type: ignore[has-type]
77
+ ]
78
+
79
+ def __new__(cls, elem):
80
+ assert torch._is_functional_tensor(elem)
81
+
82
+ # In general, we'd like our functional tensor subclass to only be in charge of functionalization,
83
+ # and defer to the inner subclass for all other functionality.
84
+ # Example: If our inner tensor is a ZeroTensor, we would want to defer running the ZeroTensor fallback
85
+ # until after we redispatch to our inner ZeroTensor.
86
+ # However, there are a few keys that we need to mirror between the inner and outer tensors.
87
+ # Conjugate
88
+ # Negative
89
+ # Why? These keys are used to test metadata queries, like `.is_conj()` and `.is_neg()`.
90
+ # We **need** calls to is_conj() to return the same thing on the outer and inner tensors,
91
+ # Because user code / framework code that branches like so needs to do the same thing
92
+ # when it sees the outer FunctionalTensor:
93
+ # if (x.is_conj()) {
94
+ # return at::view_as_real(x.resolve_conj());
95
+ # } else {
96
+ # return at::view_as_real(x);
97
+ # }
98
+ extra_dispatch_keys = (
99
+ FunctionalTensor._extra_dispatch_keys & torch._C._dispatch_keys(elem)
100
+ )
101
+
102
+ out = torch.Tensor._make_wrapper_subclass( # type: ignore[arg-type, attr-defined]
103
+ # TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great.
104
+ # Calling the overload that has kwargs causes us to go down the first overload path,
105
+ # which will **always** specialize sizes.
106
+ # We should probably eventually fix this so that the first overload can just handle dynamic shapes.
107
+ cls,
108
+ elem.shape, # sizes
109
+ elem.stride(), # strides
110
+ elem.storage_offset(), # storage_offset
111
+ None, # memory_format
112
+ elem.dtype, # dtype
113
+ elem.layout, # layout
114
+ elem.device, # device
115
+ False, # pin_memory
116
+ elem.requires_grad, # requires_grad
117
+ "sizes", # dispatch_sizes_strides_policy
118
+ False, # dispatch_device
119
+ False, # dispatch_layout
120
+ extra_dispatch_keys, # _extra_dispatch_keys
121
+ )
122
+ out.elem = elem
123
+ return out
124
+
125
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
126
+ unrecognized_types = [
127
+ t
128
+ for t in types
129
+ if t not in [torch.Tensor, torch._subclasses.FakeTensor, FunctionalTensor]
130
+ ]
131
+ if unrecognized_types:
132
+ not_implemented_log.debug(
133
+ "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types
134
+ )
135
+ return NotImplemented
136
+
137
+ if kwargs is None:
138
+ kwargs = {}
139
+
140
+ # FunctionalTensor needs to plumb all metadata requests to the inner tensor.
141
+ # In theory we don't have to do this - but if we want to service metadata requests here,
142
+ # we need to carefully make sure all metadata is accurate (including metadata mutations)
143
+ if func in FunctionalTensor.metadata_fns:
144
+ # All metadata accesses should be plumbed to the inner tensor, that way we don't have to worry
145
+ # about the problem of keeping metadata in sync between the wrapper and inner tensor.
146
+ # This also alleviates us from having to manually handle metadata mutations on the wrapper.
147
+ assert len(kwargs) == 0
148
+ if func in [
149
+ torch.ops.aten.is_strides_like_format.default,
150
+ torch.ops.aten.is_contiguous.memory_format,
151
+ ]:
152
+ assert len(args) == 2 and isinstance(args[0], FunctionalTensor)
153
+ return func(args[0].elem, args[1])
154
+ assert len(args) == 1 and isinstance(args[0], FunctionalTensor)
155
+
156
+ return func(args[0].elem)
157
+ # Originally I tried to implement my subclass without giving it a torch_dispatch, but I gave up:
158
+ # - _make_wrapper_subclass requires a __torch_dispatch__
159
+ # - If we want to use _make_subclass(), we have a problem: the subclass will share a TensorImpl with the inner tensor,
160
+ # which is of type FunctionalTensorWrapper! We explicitly do not want our wrapper to be a FunctionalTensorWrapper.
161
+ # - If we use the default tensor.__new__(), we have another problem: it returns inner_tensor.alias(),
162
+ # which causes every subclass created above autograd to have autograd view metadata
163
+ # (in addition to also being a FunctionalTensorWrapper).
164
+ raise RuntimeError(
165
+ "Attempting to use FunctionalTensor on its own. Instead, please use it with a corresponding FunctionalTensorMode()"
166
+ )
167
+
168
+ def __repr__(self):
169
+ return f"FunctionalTensor({repr(self.elem)})"
170
+
171
+ @staticmethod
172
+ def to_functional(x):
173
+ # We will do the wrapping for the user.
174
+ assert not torch._is_functional_tensor(x)
175
+ # The only autograd metadata we care about on the FunctionalTensor is:
176
+ # - requires_grad (so autograd runs)
177
+ # - is_leaf (so that mutations on graph inputs that are not leaves are allowed by the autograd engine)
178
+ # this is handled by FunctionalTensor.to_functional
179
+ x_functional = torch._to_functional_tensor(x)
180
+ # Technically the FunctionalTensormode here is unnecessary,
181
+ # but it avoids spurious NotImplemented logs during `ProxyTorchDispatchMode` tracing.
182
+ # _mirror_autograd_meta_to queries tensor sizes,
183
+ # and otherwise the sym_size() call will go to the proxy mode before hitting
184
+ # FunctionalTensor.__torch_dispatch__
185
+
186
+ functional_mode = _detect_functional_mode()
187
+ assert functional_mode is not None
188
+
189
+ with functional_mode:
190
+ torch._mirror_autograd_meta_to(x, x_functional) # type: ignore[attr-defined]
191
+ out = FunctionalTensor(x_functional)
192
+ torch._mirror_autograd_meta_to(x_functional, out) # type: ignore[attr-defined]
193
+ return out
194
+
195
+ def from_functional(self):
196
+ torch._sync(self)
197
+ return torch._from_functional_tensor(self.elem)
198
+
199
+ def replace_(self, output) -> None:
200
+ torch._functionalize_replace(self.elem, output)
201
+
202
+ def commit_update(self) -> None:
203
+ torch._functionalize_commit_update(self.elem)
204
+
205
+ def sync(self) -> None:
206
+ torch._functionalize_sync(self.elem)
207
+
208
+ def mark_mutation_hidden_from_autograd(self) -> None:
209
+ torch._functionalize_mark_mutation_hidden_from_autograd(self.elem)
210
+
211
+ def tolist(self) -> Any:
212
+ if self.elem.dim() == 0:
213
+ return self.elem.item()
214
+ elif self.elem.dim() == 1:
215
+ return [elem.item() for elem in self.elem]
216
+ else:
217
+ return [elem.tolist() for elem in self.elem]
218
+
219
+
220
+ class FunctionalTensorMode(TorchDispatchMode):
221
+ def __init__(self, pre_dispatch=False, export=False, _allow_token_discovery=False):
222
+ self.export = export
223
+ self.is_on_stack = False
224
+ self.enter_stack = []
225
+ # Indicates to our torch_dispatch dispatching infra that
226
+ # this is an "infra" mode with lower dispatching precedence.
227
+ self._mode_key = torch._C._TorchDispatchModeKey.FUNCTIONAL
228
+ self.pre_dispatch = pre_dispatch
229
+ # This will be turned off later for pre-dispatch functionalization
230
+ self._dispatch_key = torch._C.DispatchKey.PreDispatch if pre_dispatch else None # type: ignore[attr-defined]
231
+ # Map of effect type (ex. _EffectType.ORDERED) to a token. The tokens help keep
232
+ # track of the ordering between side effectful operations.
233
+ self._tokens: Dict[Any, torch.Tensor] = {}
234
+
235
+ # Functionalization runs twice in AOTAutograd, once in
236
+ # `run_functionalized_fw_and_collect_metadata` to collect metadata to
237
+ # see which tensors need to be functionalized and discover how many
238
+ # tokens we need, and another time in `make_fx` which does the actual
239
+ # tracing to replace ops with their functional variants and handling
240
+ # side-effectful ops. In the second stage there should be no token
241
+ # discovery. This flag distinguishes between the two stages.
242
+ self._allow_token_discovery = _allow_token_discovery
243
+
244
+ # No-op if FunctionalTensorMode is already in use
245
+ def __enter__(self):
246
+ def _get_prev_mode():
247
+ if self._dispatch_key == torch._C.DispatchKey.PreDispatch:
248
+ return _get_dispatch_mode_pre_dispatch(
249
+ torch._C._TorchDispatchModeKey.FUNCTIONAL
250
+ )
251
+ return torch._C._get_dispatch_mode(
252
+ torch._C._TorchDispatchModeKey.FUNCTIONAL
253
+ )
254
+
255
+ if _get_prev_mode() is None:
256
+ self.enter_stack.append(True)
257
+ return super().__enter__()
258
+ else:
259
+ self.enter_stack.append(False)
260
+ return self
261
+
262
+ def __exit__(self, a, b, c):
263
+ is_on_stack = self.enter_stack.pop()
264
+ if is_on_stack:
265
+ super().__exit__(a, b, c)
266
+
267
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
268
+ if kwargs is None:
269
+ kwargs = {}
270
+
271
+ unrecognized_types = [
272
+ t
273
+ for t in types
274
+ if not issubclass(t, torch._subclasses.FakeTensor)
275
+ and t not in [torch.Tensor, FunctionalTensor]
276
+ ]
277
+ if unrecognized_types:
278
+ not_implemented_log.debug(
279
+ "FunctionalTensor unrecognized subclass(es): %s", unrecognized_types
280
+ )
281
+ return NotImplemented
282
+
283
+ def _can_decompose(func):
284
+ # See https://github.com/pytorch/pytorch/pull/115258#issuecomment-1900755832
285
+ # We never decompose dropout in export
286
+ if self.export and func == torch.ops.aten.dropout.default:
287
+ return False
288
+ # TODO (tmanlaibaatar)
289
+ # Eventually, we don't want to decompose any aten op at all
290
+ # but there is a safety and coverage gap that we need to close
291
+ # before that.
292
+ #
293
+ # (1) the "safety" is what we are risking with this PR
294
+ # (we are blindly taking every op that advertises as
295
+ # functional and sending it to the functional fallback.
296
+ # We risk silent correctness if we have an op that lies about its schema,
297
+ # that we didn't manually hardcode above) Therefore we always decompose them
298
+ # (2) the "not every composite inplace op has a functional variant" is a coverage gap,
299
+ # but not really a safety risk, since we'll loudly error when we try to generate
300
+ # functionalization kernels for these new (composite) inplace/view ops. But until we
301
+ # establish such gap more concretely, we still decompose them
302
+ if self._dispatch_key is not None:
303
+ # it is unsafe to not decompose ops that claim to be functional but actually aren't
304
+ if func in FunctionalTensor.maybe_aliasing_or_mutating_ops:
305
+ return True
306
+ # only decompose view or inplace mutating ops
307
+ alias_info = len(
308
+ [i for i in func._schema.arguments if i.alias_info is not None]
309
+ )
310
+ return alias_info != 0 or func._schema.is_mutable
311
+ return True
312
+
313
+ if (
314
+ func not in FunctionalTensor.metadata_fns
315
+ and _can_decompose(func)
316
+ # Not all funcs from __torch_dispatch__ are actual dispatcher ops,
317
+ # e.g. prim.device
318
+ and torch._C._dispatch_has_kernel(func.name())
319
+ ):
320
+ with self:
321
+ r = func.decompose(*args, **kwargs)
322
+ if r is not NotImplemented:
323
+ return r
324
+
325
+ def assert_is_functional(x):
326
+ assert torch._is_functional_tensor(x)
327
+
328
+ def wrap(x):
329
+ # Only wrap our outputs in subclasses if the inner functionalization call
330
+ # also wrapped outputs into FunctionalTensorWrappers.
331
+ # When can this happen? e.g. `torch.div(2, 2)`
332
+ assert not isinstance(x, FunctionalTensor)
333
+ if isinstance(x, torch.Tensor) and torch._is_functional_tensor(x):
334
+ return FunctionalTensor(x)
335
+ return x
336
+
337
+ def unwrap(x):
338
+ return x.elem
339
+
340
+ from torch._higher_order_ops.auto_functionalize import (
341
+ can_auto_functionalize,
342
+ do_auto_functionalize,
343
+ )
344
+
345
+ if can_auto_functionalize(
346
+ func
347
+ ) and not torch._C._dispatch_has_kernel_for_dispatch_key(
348
+ func.name(), torch._C.DispatchKey.Functionalize
349
+ ):
350
+ if self.pre_dispatch:
351
+ raise NotImplementedError(
352
+ "Auto functionalization is not supported on pre-dispatch tracing"
353
+ )
354
+ return do_auto_functionalize(func, args, kwargs)
355
+
356
+ from torch._higher_order_ops.effects import handle_effects, has_effects
357
+
358
+ if has_effects(func, args, kwargs):
359
+ assert not torch._C._dispatch_has_kernel_for_dispatch_key(
360
+ func.name(), torch._C.DispatchKey.Functionalize
361
+ )
362
+ return handle_effects(
363
+ self._allow_token_discovery, self._tokens, func, args, kwargs
364
+ )
365
+
366
+ args_unwrapped, kwargs_unwrapped = pytree.tree_map_only(
367
+ FunctionalTensor, unwrap, (args, kwargs)
368
+ )
369
+
370
+ # Expectation: functionalization should not **already** be enabled above our mode.
371
+ # Why would that be bad? when we return a FunctionalTensor here, we don't want functionalization
372
+ # to run above this mode and further wrap that output in **another** C++ FunctionalTensorWrapper.
373
+ is_included = torch._C._dispatch_tls_is_dispatch_key_included(
374
+ torch._C.DispatchKey.Functionalize
375
+ )
376
+ is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded(
377
+ torch._C.DispatchKey.Functionalize
378
+ )
379
+ assert is_excluded or not is_included
380
+ include_to_set = (
381
+ torch._C._dispatch_tls_local_include_set()
382
+ | torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
383
+ )
384
+ exclude_to_set = (
385
+ torch._C._dispatch_tls_local_exclude_set().remove(
386
+ torch._C.DispatchKey.Functionalize
387
+ )
388
+ - FunctionalTensor._extra_dispatch_keys
389
+ )
390
+
391
+ # All we want to do here is re-use the existing C++ functionalization logic.
392
+ # This requires swizzling our TLS dispatch keys so that the Functionalize key is active.
393
+ with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set):
394
+ try:
395
+ # By default for python functionalization (for AOTAutograd), we reapply views.
396
+ old_apply_views = torch._functionalize_enable_reapply_views(True) # type: ignore[attr-defined]
397
+
398
+ # Sometimes these functions cannot be directly dispatched to functionalize key
399
+ # because args are sometimes not functional tensors for some reason?
400
+ if func in FunctionalTensor.metadata_fns:
401
+ outs_unwrapped = func(*args_unwrapped, **kwargs_unwrapped)
402
+ outs_wrapped = pytree.tree_map_only(
403
+ torch.Tensor, wrap, outs_unwrapped
404
+ )
405
+ else:
406
+ # When we dispatch to the C++ functionalization kernel, we might need to jump back to the
407
+ # PreDispatch mode stack afterwards, to handle any other PreDispatch modes underneath
408
+ # FunctionalTensorMode. If we call func() directly, we would need to exclude PreDispatch
409
+ # from the TLS in order to avoid infinite looping, but this would prevent us from coming
410
+ # back to PreDispatch later
411
+ outs_unwrapped = func._op_dk(
412
+ torch._C.DispatchKey.Functionalize,
413
+ *args_unwrapped,
414
+ **kwargs_unwrapped,
415
+ )
416
+ # We don't allow any mutation on result of dropout
417
+ if self.export and func == torch.ops.aten.dropout.default:
418
+ torch._freeze_functional_tensor(outs_unwrapped) # type: ignore[attr-defined]
419
+ outs_wrapped = pytree.tree_map_only(
420
+ torch.Tensor, wrap, outs_unwrapped
421
+ )
422
+ finally:
423
+ torch._disable_functionalization()
424
+ torch._functionalize_enable_reapply_views(old_apply_views) # type: ignore[attr-defined]
425
+
426
+ is_included = torch._C._dispatch_tls_is_dispatch_key_included(
427
+ torch._C.DispatchKey.Functionalize
428
+ )
429
+ is_excluded = torch._C._dispatch_tls_is_dispatch_key_excluded(
430
+ torch._C.DispatchKey.Functionalize
431
+ )
432
+ assert is_excluded or not is_included
433
+
434
+ if (
435
+ # If no outputs are our functional subclass, then don't try to fix up aliasing
436
+ not any(
437
+ isinstance(x, FunctionalTensor)
438
+ for x in pytree.tree_leaves(outs_wrapped)
439
+ )
440
+ # Since lift_fresh lifts its argument into a functional tensor, we can skip the
441
+ # aliasing correction step. Otherwise, we would be setting the storage of a
442
+ # lifted tensor to that of an unlifted tensor.
443
+ # Ref: https://github.com/pytorch/pytorch/issues/111506
444
+ or func == torch.ops.aten.lift_fresh.default
445
+ ):
446
+ return outs_wrapped
447
+ # Wrapper tensor subclasses do not have correct aliasing info! Use this util to manually correct the output aliasing.
448
+ # inplace ops like `aten.add_()` are expected to return inputs **directly**, instead of creating fresh tensor objects.
449
+ # Use this util to figure out the right thing to return.
450
+ # If none of our inputs were wrapped, then we have no FunctionalTensor outputs that we need to fix up storages for.
451
+ return return_and_correct_aliasing(func, args, kwargs, outs_wrapped)
452
+
453
+
454
+ @contextlib.contextmanager
455
+ def disable_functional_mode():
456
+ return _disable_infra_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
457
+
458
+
459
+ # This is similar to torch.func.functionalize, but:
460
+ # - It uses FunctionalTensorMode, and FunctionalTensor (a python subclass).
461
+ # One important advantage to using this mode is that it will let us
462
+ # run functionalization underneath __torch_dispatch__,
463
+ # which we need in AOTAutograd.
464
+ # - Doing so means that it does not automatically compose with other
465
+ # functorch transforms, since these transforms always run above __torch_dispatch__.
466
+ # That's why this util lives here, and not in functorch.
467
+ def dispatch_functionalize(func, mode: FunctionalTensorMode = FunctionalTensorMode()):
468
+ # TODO: pull these from aot autograd
469
+ def to_fun(t):
470
+ if isinstance(t, torch.Tensor):
471
+ return FunctionalTensor.to_functional(t)
472
+ return t
473
+
474
+ def from_fun(t):
475
+ if not isinstance(t, FunctionalTensor):
476
+ # quick sanity assert
477
+ if isinstance(t, torch.Tensor):
478
+ assert not torch._is_functional_tensor(t)
479
+ return t
480
+ torch._sync(t)
481
+ return torch._from_functional_tensor(t.elem)
482
+
483
+ def inner(*args, **kwargs):
484
+ disable_above = torch._C._ExcludeDispatchKeyGuard(
485
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
486
+ )
487
+ with disable_above, mode:
488
+ func_args = pytree.tree_map_only(torch.Tensor, to_fun, args)
489
+ func_kwargs = pytree.tree_map_only(torch.Tensor, to_fun, kwargs)
490
+ func_outputs = func(*func_args, **func_kwargs)
491
+ outputs = pytree.tree_map_only(FunctionalTensor, from_fun, func_outputs)
492
+
493
+ return outputs
494
+
495
+ return inner
496
+
497
+
498
+ class BaseFunctionalizeAPI(ABC):
499
+ @abstractmethod
500
+ def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
501
+ pass
502
+
503
+ @abstractmethod
504
+ def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
505
+ pass
506
+
507
+ @abstractmethod
508
+ def functionalize(self, inner_f: Callable) -> Callable:
509
+ pass
510
+
511
+ @abstractmethod
512
+ def redispatch_to_next(self) -> ContextManager:
513
+ pass
514
+
515
+ @abstractmethod
516
+ def replace(self, input_tensor, output_tensor) -> None:
517
+ pass
518
+
519
+ @abstractmethod
520
+ def commit_update(self, tensor) -> None:
521
+ pass
522
+
523
+ @abstractmethod
524
+ def sync(self, tensor) -> None:
525
+ pass
526
+
527
+ @abstractmethod
528
+ def mark_mutation_hidden_from_autograd(self, tensor) -> None:
529
+ pass
530
+
531
+
532
+ class PythonFunctionalizeAPI(BaseFunctionalizeAPI):
533
+ def __init__(
534
+ self, mode: Optional[FunctionalTensorMode] = None, pre_dispatch: bool = False
535
+ ) -> None:
536
+ super().__init__()
537
+ self.mode = mode if mode else FunctionalTensorMode()
538
+ self.pre_dispatch = pre_dispatch
539
+
540
+ def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
541
+ with self.mode:
542
+ return torch.utils._pytree.tree_map_only(
543
+ torch.Tensor, FunctionalTensor.to_functional, args
544
+ )
545
+
546
+ def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
547
+ return torch.utils._pytree.tree_map_only(
548
+ FunctionalTensor, FunctionalTensor.from_functional, args
549
+ )
550
+
551
+ def functionalize(self, inner_f: Callable) -> Callable:
552
+ return dispatch_functionalize(inner_f, self.mode)
553
+
554
+ def redispatch_to_next(self) -> ContextManager:
555
+ # [NOTE] We don't do anything here because at the time
556
+ # we exercise this path, we would have already popped the
557
+ # FunctionalTensorMode from mode stack. Since FunctionalTensorMode
558
+ # is now stateful, it is better to explicitly pass in correct mode
559
+ # directly instead of globally setting it.
560
+ return contextlib.nullcontext()
561
+
562
+ def replace(self, input_tensor, output_tensor) -> None:
563
+ assert isinstance(input_tensor, FunctionalTensor)
564
+ assert not isinstance(output_tensor, FunctionalTensor)
565
+ input_tensor.replace_(output_tensor)
566
+
567
+ def commit_update(self, tensor) -> None:
568
+ assert isinstance(tensor, FunctionalTensor)
569
+ tensor.commit_update()
570
+
571
+ def sync(self, tensor) -> None:
572
+ assert isinstance(tensor, FunctionalTensor)
573
+ tensor.sync()
574
+
575
+ def mark_mutation_hidden_from_autograd(self, tensor) -> None:
576
+ assert isinstance(tensor, FunctionalTensor)
577
+ tensor.mark_mutation_hidden_from_autograd()
578
+
579
+
580
+ class CppFunctionalizeAPI(BaseFunctionalizeAPI):
581
+ def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
582
+ from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional
583
+
584
+ return _wrap_all_tensors_to_functional(args, level=0)
585
+
586
+ def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
587
+ from torch._functorch.eager_transforms import (
588
+ _unwrap_all_tensors_from_functional,
589
+ )
590
+
591
+ return _unwrap_all_tensors_from_functional(args, reapply_views=_reapply_views())
592
+
593
+ def functionalize(self, inner_f: Callable) -> Callable:
594
+ return torch.func.functionalize(inner_f)
595
+
596
+ def redispatch_to_next(self) -> ContextManager:
597
+ return torch._C._ExcludeDispatchKeyGuard(
598
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
599
+ )
600
+
601
+ def replace(self, input_tensor, output_tensor) -> None:
602
+ torch._functionalize_replace(input_tensor, output_tensor)
603
+
604
+ def commit_update(self, tensor) -> None:
605
+ torch._functionalize_commit_update(tensor)
606
+
607
+ def sync(self, tensor) -> None:
608
+ torch._functionalize_sync(tensor)
609
+
610
+ def mark_mutation_hidden_from_autograd(self, tensor) -> None:
611
+ torch._functionalize_mark_mutation_hidden_from_autograd(tensor)
612
+
613
+
614
+ class FunctorchFunctionalizeAPI(BaseFunctionalizeAPI):
615
+ def __init__(self, interpreter):
616
+ self.interpreter = interpreter
617
+
618
+ def wrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
619
+ from torch._functorch.eager_transforms import _wrap_all_tensors_to_functional
620
+
621
+ return _wrap_all_tensors_to_functional(args, level=self.interpreter.level())
622
+
623
+ def unwrap_tensors(self, args: Tuple[Any]) -> Tuple[Any]:
624
+ from torch._functorch.eager_transforms import (
625
+ _unwrap_all_tensors_from_functional,
626
+ )
627
+
628
+ return _unwrap_all_tensors_from_functional(
629
+ args, reapply_views=self.interpreter.functionalize_add_back_views()
630
+ )
631
+
632
+ def functionalize(self, inner_f: Callable) -> Callable:
633
+ return torch.func.functionalize(
634
+ inner_f,
635
+ remove="mutations_and_views"
636
+ if self.interpreter.functionalize_add_back_views()
637
+ else "mutations",
638
+ )
639
+
640
+ def redispatch_to_next(self) -> ContextManager:
641
+ return self.interpreter.lower()
642
+
643
+ def replace(self, input_tensor, output_tensor) -> None:
644
+ torch._functionalize_replace(input_tensor, output_tensor)
645
+
646
+ def commit_update(self, tensor) -> None:
647
+ torch._functionalize_commit_update(tensor)
648
+
649
+ def sync(self, tensor) -> None:
650
+ torch._functionalize_sync(tensor)
651
+
652
+ def mark_mutation_hidden_from_autograd(self, tensor) -> None:
653
+ torch._functionalize_mark_mutation_hidden_from_autograd(tensor)
venv/lib/python3.10/site-packages/torch/_subclasses/meta_utils.py ADDED
@@ -0,0 +1,987 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import warnings
3
+ import weakref
4
+ from typing import ContextManager, Dict, List, Optional, Tuple, TYPE_CHECKING
5
+
6
+ import torch
7
+ from torch._C._functorch import (
8
+ _add_batch_dim,
9
+ _unwrap_functional_tensor,
10
+ _wrap_functional_tensor,
11
+ current_level,
12
+ get_unwrapped,
13
+ is_batchedtensor,
14
+ is_functorch_wrapped_tensor,
15
+ is_gradtrackingtensor,
16
+ maybe_get_bdim,
17
+ maybe_get_level,
18
+ peek_interpreter_stack,
19
+ TransformType,
20
+ )
21
+ from torch._guards import Source
22
+
23
+ from torch.multiprocessing.reductions import StorageWeakRef
24
+ from torch.utils._python_dispatch import (
25
+ is_traceable_wrapper_subclass,
26
+ transform_subclass,
27
+ )
28
+ from torch.utils.weak import WeakIdRef
29
+
30
+ if TYPE_CHECKING:
31
+ # Import the following modules during type checking to enable code intelligence features,
32
+ # Do not import unconditionally, as they import sympy and importing sympy is very slow
33
+ from torch.fx.experimental.symbolic_shapes import SymbolicContext
34
+
35
+ DimList = List
36
+
37
+
38
+ def safe_is_leaf(t):
39
+ try:
40
+ return t.is_leaf
41
+ except RuntimeError:
42
+ # inference mode can trigger this
43
+ return False
44
+
45
+
46
+ def safe_grad(t):
47
+ with warnings.catch_warnings():
48
+ warnings.filterwarnings("ignore", "The .grad attribute of a Tensor")
49
+ return t.grad
50
+
51
+
52
+ def assert_eq(a, b):
53
+ assert a == b, f"{a} != {b}"
54
+
55
+
56
+ def assert_metadata_eq(assert_eq, m1, m2, *, skip_symbolic=False):
57
+ def go(m1, m2):
58
+ assert_eq(m1.dtype, m2.dtype)
59
+ if not skip_symbolic:
60
+ assert_eq(m1.shape, m2.shape)
61
+ assert_eq(m1.requires_grad, m2.requires_grad)
62
+ assert_eq(m1.is_leaf, m2.is_leaf)
63
+ assert_eq(m1.grad_fn is None, m2.grad_fn is None)
64
+ assert_eq(m1.is_sparse, m2.is_sparse)
65
+ assert_eq(m1.is_inference(), m2.is_inference())
66
+ assert_eq(m1.is_conj(), m2.is_conj())
67
+ assert_eq(m1.is_neg(), m2.is_neg())
68
+ assert_eq(safe_grad(m1) is not None, safe_grad(m2) is not None)
69
+ if safe_grad(m1) is not None:
70
+ go(safe_grad(m1), safe_grad(m2))
71
+ if m1.is_sparse:
72
+ assert_eq(m1.dense_dim(), m2.dense_dim())
73
+ assert_eq(m1.sparse_dim(), m2.sparse_dim())
74
+ assert_eq(m1.is_coalesced(), m2.is_coalesced())
75
+ else:
76
+ if not skip_symbolic:
77
+ assert_eq(m1.stride(), m2.stride())
78
+ assert_eq(m1.storage_offset(), m2.storage_offset())
79
+ assert_eq(m1._is_view(), m2._is_view())
80
+ if m1._is_view():
81
+ go(m1._base, m2._base)
82
+ # TODO: test if is resizable (no direct query for this atm)
83
+ # TODO: audit AutogradMeta to see if it matches
84
+ # TODO: test forward AD
85
+
86
+ return go(m1, m2)
87
+
88
+
89
+ def is_sparse_coo(t):
90
+ return isinstance(t, torch.Tensor) and t.layout is torch.sparse_coo
91
+
92
+
93
+ def is_sparse_compressed(t):
94
+ return isinstance(t, torch.Tensor) and t.layout in {
95
+ torch.sparse_csr,
96
+ torch.sparse_csc,
97
+ torch.sparse_bsr,
98
+ torch.sparse_bsc,
99
+ }
100
+
101
+
102
+ def is_sparse_any(t):
103
+ return is_sparse_coo(t) or is_sparse_compressed(t)
104
+
105
+
106
+ # This is a class for converting multiple tensors into meta tensors which
107
+ # share the same view/storage structure. The operation model is you allocate
108
+ # one of these, and then call it repeatedly on all the tensors you want to
109
+ # convert. It's important to use the same object for tensors you want to
110
+ # share storage because this is how we correlate shared storages to the same
111
+ # meta storages. This class will hold weak references to cached tenosrs
112
+ # and tensor storages.
113
+ class MetaConverter:
114
+ def __init__(self):
115
+ self.storage_memo = {}
116
+ self.tensor_memo: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
117
+ self.maybe_storages_to_delete = []
118
+ self.check_expired_frequency = 128
119
+ self.check_expired_count = 0
120
+ self.hit = 0
121
+ self.miss = 0
122
+ self.del_hook = None
123
+ self.arg_cnt = 0
124
+
125
+ def successful(self):
126
+ return self.hit > 0 and self.miss == 0
127
+
128
+ def check_for_expired_weak_storages(self):
129
+ new_li = []
130
+ stor_to_delete = []
131
+ for obj in self.maybe_storages_to_delete:
132
+ if not obj.expired():
133
+ new_li.append(obj)
134
+ else:
135
+ stor_to_delete.append(obj)
136
+ for obj in stor_to_delete:
137
+ self.storage_memo.pop(obj, None)
138
+ self.maybe_storages_to_delete = new_li
139
+
140
+ # if for some reason we have aquired many storages which have not expired
141
+ # even though a tensor with their storage has expired (aliasing or otherwise)
142
+ # check for expired storages less often so as to bound the amount of work we
143
+ # do checking for expired storages
144
+ self.check_expired_frequency = max(
145
+ self.check_expired_frequency, len(self.maybe_storages_to_delete)
146
+ )
147
+
148
+ def get_tensor_memo(self, t):
149
+ return self.tensor_memo.get(WeakIdRef(t), None)
150
+
151
+ def set_tensor_memo(self, t, v):
152
+ # hold a weak ref to self, otherwise it will be kept alive
153
+ # by the del_ten closure
154
+ self_weak_ref = weakref.ref(self)
155
+ if is_sparse_any(t) or t.is_mkldnn or is_functorch_wrapped_tensor(t):
156
+ weak_st = None
157
+ else:
158
+ weak_st = StorageWeakRef(t._typed_storage())
159
+ tensor_ref_key = WeakIdRef(t)
160
+
161
+ def del_ten():
162
+ # tensor outlives the converter
163
+ self_ref = self_weak_ref()
164
+ if self_ref is None:
165
+ return
166
+ # on shutdown, tensor_ref_key may not be in memo
167
+ self_ref.tensor_memo.pop(tensor_ref_key, None)
168
+ if weak_st and weak_st.expired():
169
+ self_ref.storage_memo.pop(weak_st, None)
170
+ elif weak_st is not None:
171
+ # [expired-storages]
172
+ # NB: even though the tensor has died,
173
+ # the deallocation of its storage can take longer,
174
+ # even when the storage has no other uses/views.
175
+ # In this case, the StorageWeakRef object will be kept alive
176
+ # longer than it needs to be, however the storage itself
177
+ # will be deallocated. We retain the possibly dead storages
178
+ # and periodically check if any of them are expired and
179
+ # can be freed.
180
+ self_ref.maybe_storages_to_delete.append(weak_st)
181
+
182
+ weakref.finalize(t, del_ten)
183
+ self.tensor_memo[tensor_ref_key] = v
184
+
185
+ # NB: doesn't actually return a storage, because meta storage is
186
+ # not supported
187
+ def meta_storage(self, s, callback):
188
+ # NB: TypedStorage is freshly allocated and cannot be used as hash
189
+ # key index.
190
+
191
+ # Use a Weak Ref to s in order to not leak memory
192
+ swr = StorageWeakRef(s)
193
+ if swr not in self.storage_memo:
194
+ self.storage_memo[swr] = callback(
195
+ lambda: torch.empty(s.size(), dtype=torch.uint8, device="meta")
196
+ ).untyped_storage()
197
+ return self.storage_memo[swr]
198
+
199
+ # This function assumes that it's possible to do the conversion
200
+ # NB: name here is used in a conventional way by Dynamo; it corresponds
201
+ # precisely to the Source.name() of the tensor we're fakeifying and
202
+ # corresponds to a valid Python expression. When we construct sub-names
203
+ # as part of this process, we will maintain this invariant! (Even though
204
+ # other users of this may not need it this property to be upheld.)
205
+ def meta_tensor(
206
+ self,
207
+ t,
208
+ shape_env=None,
209
+ callback=lambda t: t(),
210
+ source: Optional[Source] = None,
211
+ symbolic_context: Optional["SymbolicContext"] = None,
212
+ ):
213
+ if source is None:
214
+ from torch._dynamo.source import ConstantSource
215
+
216
+ # TODO: make a dedicated UnknownSource for this?
217
+ source = ConstantSource(
218
+ f"__meta_utils_unknown_tensor{len(self.tensor_memo)}"
219
+ )
220
+
221
+ # This indicates you set no_dispatch() before calling into this
222
+ # function. This is an error: we may be creating fake tensors and
223
+ # will perform operations on them which need fake tensor mode to
224
+ # be active. You will segfault if you are in a no_dispatch() block.
225
+ assert not torch._C._dispatch_tls_local_exclude_set().has(
226
+ torch._C.DispatchKey.Python
227
+ )
228
+ arg_cnt = self.arg_cnt
229
+ self.arg_cnt += 1
230
+
231
+ # When we make as_strided calls, we end up generating a guard
232
+ # that the new as_strided tensor is in bounds for the old storage
233
+ # for the base (since as_strided calls can "bust" out of their
234
+ # bounding box.) This guard is unnecessary: if a user is able
235
+ # to provide us a tensor with the view base setup this way, we
236
+ # don't need to produce a guard, because the fact that they
237
+ # were able to produce the view base means its in bounds.
238
+ #
239
+ # Now, ordinarily, this guard would be harmless. However, the
240
+ # generated guard refers to variables bound on the base variable.
241
+ # At the moment, Dynamo doesn't actually guard on x._base, because
242
+ # according to Voz this results in a lot of spurious invalidations,
243
+ # and also if the user doesn't directly make use of _base, its
244
+ # pointless anyway (because programs should be parametric over
245
+ # whether or not the input tensor is a view or not--unless you're
246
+ # mutating the input, but that's a whole 'nother ballgame). So
247
+ # for expediency, we suppress these guards so we don't have to
248
+ # deal with this (yet, anyway.)
249
+ #
250
+ # NB: An old version of this code suppressed guards for ALL operations
251
+ # happening during meta conversion, not just as_strided calls.
252
+ # This is too aggressive: we do duck sizing and 0/1 simplification
253
+ # as we allocate variables, and we do need to register guards for
254
+ # these cases.
255
+ maybe_suppress = contextlib.nullcontext
256
+ if shape_env is not None:
257
+ maybe_suppress = shape_env.suppress_guards
258
+
259
+ def sym_sizes_strides_storage_offset(
260
+ t, src, symbolic_context=symbolic_context
261
+ ) -> Tuple[Tuple[int, ...], Tuple[int, ...], int]:
262
+ if shape_env is not None:
263
+ fake_mode = torch._subclasses.fake_tensor.maybe_get_fake_mode(t)
264
+ if fake_mode is not None and fake_mode.shape_env is shape_env:
265
+ # Don't reallocate the sizes; the shape envs are the same,
266
+ # so reuse the old sizes/strides/etc
267
+ return (t.size(), t.stride(), t.storage_offset())
268
+ else:
269
+ return shape_env.create_symbolic_sizes_strides_storage_offset(
270
+ t,
271
+ src,
272
+ symbolic_context=symbolic_context,
273
+ )
274
+ else:
275
+ assert symbolic_context is None
276
+ return (t.size(), t.stride(), t.storage_offset())
277
+
278
+ def empty_create(inner_t, inner_src, symbolic_context=symbolic_context):
279
+ (
280
+ inner_sizes,
281
+ inner_strides,
282
+ inner_storage_offset,
283
+ ) = sym_sizes_strides_storage_offset(inner_t, inner_src, symbolic_context)
284
+ return torch.empty_strided(
285
+ inner_sizes,
286
+ inner_strides,
287
+ dtype=inner_t.dtype,
288
+ device="meta",
289
+ )
290
+
291
+ # Creates a subclass instance with empty inner tensors according to the specified
292
+ # symbolic context.
293
+ def empty_create_subclass(
294
+ t,
295
+ outer_size,
296
+ outer_stride,
297
+ symbolic_context=symbolic_context,
298
+ callback=callback,
299
+ source=source,
300
+ ):
301
+ from torch._dynamo.source import AttrSource
302
+ from torch.fx.experimental.symbolic_shapes import SubclassSymbolicContext
303
+
304
+ assert symbolic_context is None or isinstance(
305
+ symbolic_context, SubclassSymbolicContext
306
+ )
307
+
308
+ # Note: transform_subclass will use __tensor_unflatten__ to generate
309
+ # a fresh subclass wrapper with outer sizes / strides according to the
310
+ # outer symbolic context (passed in to this function). Inner size / stride
311
+ # / storage offset symbols are allocated according to the appropriate inner
312
+ # symbolic contexts, after which the checks in transform_subclass() will
313
+ # relate them to the outer metadata as possible.
314
+ return transform_subclass(
315
+ t,
316
+ lambda attr, inner_t: callback(
317
+ lambda: empty_create(
318
+ inner_t,
319
+ AttrSource(source, attr),
320
+ symbolic_context=(
321
+ None
322
+ if symbolic_context is None
323
+ else symbolic_context.inner_contexts[attr]
324
+ ),
325
+ )
326
+ ),
327
+ outer_size=outer_size,
328
+ outer_stride=outer_stride,
329
+ )
330
+
331
+ # Returns an all-dynamic symbolic context used for metafying the given tensor with
332
+ # fully dynamic dims. This is useful when fake-ifying intermediate tensors in
333
+ # closed-over ViewFunc state, as we don't have symbolic contexts for them, but we
334
+ # don't want to over-specialize during view replay.
335
+ def all_dynamic_symbolic_context(t, source, shape_env, callback):
336
+ from torch._dynamo.source import AttrSource
337
+ from torch.fx.experimental.symbolic_shapes import (
338
+ DimDynamic,
339
+ StatelessSymbolicContext,
340
+ SubclassSymbolicContext,
341
+ SymbolicContext,
342
+ )
343
+
344
+ view_base_context: Optional[SymbolicContext] = None
345
+ if t._is_view():
346
+ view_base_context = all_dynamic_symbolic_context(
347
+ t._base, AttrSource(source, "_base"), shape_env, callback
348
+ )
349
+
350
+ t_symbolic_context: SymbolicContext
351
+ t_dynamic_sizes = [DimDynamic.DYNAMIC] * t.dim()
352
+ if is_traceable_wrapper_subclass(t):
353
+ inner_contexts: Dict[str, SymbolicContext] = {}
354
+ attrs, _ = t.__tensor_flatten__()
355
+ for attr in attrs:
356
+ assert isinstance(attr, str)
357
+ inner = getattr(t, attr)
358
+ inner_contexts[attr] = all_dynamic_symbolic_context(
359
+ inner, AttrSource(source, attr), shape_env, callback
360
+ )
361
+ t_symbolic_context = SubclassSymbolicContext(
362
+ dynamic_sizes=t_dynamic_sizes,
363
+ constraint_sizes=[None] * t.dim(),
364
+ inner_contexts=inner_contexts,
365
+ tensor_source=source,
366
+ view_base_context=view_base_context,
367
+ )
368
+ else:
369
+ t_symbolic_context = StatelessSymbolicContext(
370
+ dynamic_sizes=t_dynamic_sizes,
371
+ constraint_sizes=[None] * t.dim(),
372
+ view_base_context=view_base_context,
373
+ )
374
+
375
+ return t_symbolic_context
376
+
377
+ # Returns a fake-ified version of an input view tensor t, given an already fake-ified
378
+ # base. At a high level, we want two things:
379
+ # 1. fake_t should have the same view relationship to the given fake base as the
380
+ # input t has to its _base.
381
+ # 2. fake_t should have symbolic sizes / strides / storage offset according to the
382
+ # appropriate symbolic context (i.e. from the automatic dynamic algorithm).
383
+ #
384
+ # We currently take different strategies across view types:
385
+ # * For dense -> dense views, accomplish both (1) and (2) simultaneously via an
386
+ # as_strided() call on the fake-ified base, passing symbolic metadata.
387
+ # * For views involving subclasses, perform view replay using view funcs to
388
+ # achieve (1). It's necessary for (2) to swap out any closed-over state in
389
+ # the view funcs with symbolicized SymInts and fake-ified tensors. Doing this
390
+ # avoids specialization (and thus over-eager simplification of symbols) that
391
+ # could occur during view replay on the fake-ified base.
392
+ #
393
+ # Examples:
394
+ # * t.unsqueeze(-1) with dense t is a dense -> dense view. It can be modeled
395
+ # with an as_strided() call on the fake base passing symbolic metadata.
396
+ # * sub.select(dim=0, index=3) is a subclass -> subclass view. The index arg
397
+ # is made symbolic to avoid invalid specialization and view replay is then
398
+ # done to reconstruct the view.
399
+ # * _nested_from_jagged(values, offsets) is a dense -> subclass view
400
+ # that returns a subclass instance from a dense values tensor. The offsets
401
+ # tensor is closed over in the view func, as it can be considered view metadata.
402
+ # First, the offsets tensor is fake-ified according to the inner symbolic
403
+ # context and with the correct relationship to the outer size / stride metadata.
404
+ # Then view replay is done, swapping in the fake offsets so the view replay output
405
+ # is fully fake with no invalid specialization.
406
+ def view_from_base(base, t, source=source, shape_env=shape_env):
407
+ # fake-ify t's metadata according to the outer symbolic context
408
+ (sizes, strides, storage_offset) = sym_sizes_strides_storage_offset(
409
+ t, source
410
+ )
411
+ if not is_traceable_wrapper_subclass(
412
+ t
413
+ ) and not is_traceable_wrapper_subclass(base):
414
+ # Dense -> Dense view case uses as_strided() to construct view relationship.
415
+ # TODO: Change this logic to use view replay for consistency?
416
+ # It's likely there is no view func available.
417
+ return base.as_strided(sizes, strides, storage_offset)
418
+
419
+ from torch._dynamo.source import EphemeralSource
420
+ from torch.fx.experimental.symbolic_shapes import sym_eq
421
+
422
+ def symint_visitor_fn(s):
423
+ if shape_env is None:
424
+ return s
425
+
426
+ # NB: The symbol here is expected to be simplified out because we a priori
427
+ # allocate inner and outer symbols according to the appropriate symbolic
428
+ # contexts and prefer those over this symbol during symbol simplification
429
+ # (via usage of EphemeralSource below). This -shouldn't- happen, but if
430
+ # this symbol somehow leaks out beyond the view tensor's shape metadata, our
431
+ # assumption of it being simplified out will fail and it may be guarded on,
432
+ # which will hard error.
433
+ sym_source = EphemeralSource("symint_visitor_fn")
434
+ symbol = shape_env.create_symbol(s, sym_source)
435
+ return shape_env.create_symintnode(symbol, hint=s, source=sym_source)
436
+
437
+ real_to_fake_mapping = {}
438
+ if is_traceable_wrapper_subclass(t):
439
+ # Fake-ify t naively here; this is only done so we can get fake-ified inner
440
+ # tensors with the correct relationships to the outer sizes / strides for use
441
+ # in view replay. It's done beforehand here because it's not easy to do when
442
+ # visiting tensors one-by-one during view replay.
443
+ #
444
+ # Example:
445
+ # Consider a Dense -> NJT view. NJT has (values, offsets) components and we
446
+ # want a view of values with the offsets closed over. As the offsets component
447
+ # is needed to describe the output view, it's important that it's fakeified
448
+ # correctly.
449
+ fake_t = empty_create_subclass(
450
+ t, outer_size=sizes, outer_stride=strides
451
+ )
452
+ attrs, _ = fake_t.__tensor_flatten__()
453
+ for attr in attrs:
454
+ real_to_fake_mapping[getattr(t, attr)] = getattr(fake_t, attr)
455
+
456
+ def tensor_visitor_fn(
457
+ visited_t, shape_env=shape_env, callback=callback, source=source
458
+ ):
459
+ # It's possible to close over an undefined tensor (e.g. NJT's lengths).
460
+ if visited_t is None:
461
+ return None
462
+
463
+ # Fake inner tensors of view subclasses will come from the mapping built above.
464
+ fake_visited_t = real_to_fake_mapping.get(visited_t, None)
465
+ if fake_visited_t is not None:
466
+ return fake_visited_t
467
+
468
+ # For other closed-over tensor state, fake-ify it as all dynamic with an
469
+ # ephemeral source. This avoids invalid specialization during view replay.
470
+ # If we find that in practice the usage of ephemeral sources isn't enough
471
+ # to guarantee that we don't have guards on these symbols, we may need to
472
+ # explicitly suppress guards (as is done for _base in the dense -> dense
473
+ # view case).
474
+ temp_source = EphemeralSource("tensor_visitor_fn")
475
+ return self.meta_tensor(
476
+ visited_t,
477
+ shape_env,
478
+ callback,
479
+ source=temp_source,
480
+ symbolic_context=all_dynamic_symbolic_context(
481
+ visited_t, temp_source, shape_env, callback
482
+ ),
483
+ )
484
+
485
+ # Replay the view, swapping out any non-symbolic SymInts or real tensors
486
+ # for symbolic SymInts or fake tensors.
487
+ fake_t = t._view_func_unsafe(base, symint_visitor_fn, tensor_visitor_fn)
488
+
489
+ # Ensure the output has symbolic shapes according to the outer symbolic context.
490
+ # These checks should simplify out any symbols created for closed-over view func
491
+ # SymInts.
492
+ torch._check(sym_eq(fake_t.size(), sizes))
493
+ torch._check(sym_eq(fake_t.stride(), strides))
494
+ torch._check(sym_eq(fake_t.storage_offset(), storage_offset))
495
+ return fake_t
496
+
497
+ # see expired-storages
498
+ self.check_expired_count += 1
499
+ if self.check_expired_count >= self.check_expired_frequency:
500
+ self.check_for_expired_weak_storages()
501
+ self.check_expired_count = 0
502
+
503
+ if self.get_tensor_memo(t) is None:
504
+ with torch.inference_mode(t.is_inference()):
505
+ if t.is_sparse:
506
+ is_leaf = safe_is_leaf(t)
507
+
508
+ # The lambda function below is similar to
509
+ # `t.to(device='meta')` except the latter
510
+ # preserves nnz value
511
+ r = callback(
512
+ lambda: torch.ops.aten._sparse_coo_tensor_with_dims(
513
+ t.sparse_dim(),
514
+ t.dense_dim(),
515
+ t.shape,
516
+ dtype=t.dtype,
517
+ layout=torch.sparse_coo,
518
+ device="meta",
519
+ )
520
+ )
521
+ assert safe_is_leaf(r), "the callback you passed in doesn't detach"
522
+ # Note [is_coalesced is dispatched]
523
+ # Strangely enough, is_coalesced() is a dispatched operator,
524
+ # which means that it will get caught by fake tensor mode.
525
+ # Ordinarily this would error, but there's some logic in
526
+ # fake tensor ensure this doesn't happen.
527
+ r._coalesced_(t.is_coalesced())
528
+ if t.requires_grad:
529
+ r.requires_grad = True
530
+ if t.requires_grad and not is_leaf:
531
+ with torch.enable_grad():
532
+ r = r.clone()
533
+ r._coalesced_(t.is_coalesced())
534
+ elif is_sparse_compressed(t):
535
+ is_leaf = safe_is_leaf(t)
536
+
537
+ def mk_meta():
538
+ nnz = 0
539
+ batch_dim = t.ndim - t.sparse_dim() - t.dense_dim()
540
+ batch_size = t.shape[:batch_dim]
541
+ if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
542
+ index_dtype = t.crow_indices().dtype
543
+ compressed_indices = torch.empty(
544
+ t.crow_indices().shape, device="meta", dtype=index_dtype
545
+ )
546
+ plain_indices = torch.empty(
547
+ (*t.col_indices().shape[:-1], nnz),
548
+ device="meta",
549
+ dtype=index_dtype,
550
+ )
551
+ else:
552
+ index_dtype = t.ccol_indices().dtype
553
+ compressed_indices = torch.empty(
554
+ t.ccol_indices().shape, device="meta", dtype=index_dtype
555
+ )
556
+ plain_indices = torch.empty(
557
+ (*t.row_indices().shape[:-1], nnz),
558
+ device="meta",
559
+ dtype=index_dtype,
560
+ )
561
+ values_shape = t.values().shape
562
+ values = torch.empty(
563
+ (
564
+ *values_shape[:batch_dim],
565
+ nnz,
566
+ *values_shape[batch_dim + 1 :],
567
+ ),
568
+ dtype=t.dtype,
569
+ device="meta",
570
+ )
571
+ return torch.ops.aten.sparse_compressed_tensor(
572
+ compressed_indices,
573
+ plain_indices,
574
+ values,
575
+ t.shape,
576
+ layout=t.layout,
577
+ dtype=t.dtype,
578
+ device="meta",
579
+ )
580
+
581
+ # `mk_meta()` is similar to `t.to(device='meta'))`
582
+ # except `to('meta')` preserves nnz value while
583
+ # `mk_meta` result has nnz == 0.
584
+ r = callback(mk_meta)
585
+
586
+ assert safe_is_leaf(r), "the callback you passed in doesn't detach"
587
+ if t.requires_grad:
588
+ r.requires_grad = True
589
+ if t.requires_grad and not is_leaf:
590
+ with torch.enable_grad():
591
+ r = r.clone()
592
+ elif t.is_nested and not is_traceable_wrapper_subclass(t):
593
+ # TODO: Handle this better in Dynamo?
594
+ # There are checks there now, but this can still be triggered by a dense
595
+ # tensor graph input that is a view of a strided NT.
596
+ from torch._dynamo.exc import unimplemented
597
+
598
+ unimplemented(
599
+ "strided nested tensors are not supported by meta conversion"
600
+ )
601
+ elif t.is_mkldnn:
602
+ is_leaf = safe_is_leaf(t)
603
+ sizes, strides, _storage_offset = sym_sizes_strides_storage_offset(
604
+ t, source
605
+ )
606
+ r = callback(
607
+ lambda: torch.empty_strided(
608
+ sizes, strides, dtype=t.dtype, device="meta"
609
+ )
610
+ )
611
+ assert safe_is_leaf(r), "the callback you passed in doesn't detach"
612
+ if t.requires_grad:
613
+ r.requires_grad = True
614
+ if t.requires_grad and not is_leaf:
615
+ with torch.enable_grad():
616
+ r = r.clone()
617
+ elif is_functorch_wrapped_tensor(t):
618
+ if t._is_view():
619
+ from torch._dynamo.exc import unimplemented
620
+
621
+ unimplemented(
622
+ "view functorch tensors are not supported by meta conversion"
623
+ )
624
+
625
+ # Wraps a functorch tensor class (BatchedTensor, GradTrackingTensor)
626
+ # in a FakeTensor
627
+ def _to_fake_tensor(t):
628
+ if is_batchedtensor(t):
629
+ ft = _to_fake_tensor(get_unwrapped(t))
630
+ lvl = maybe_get_level(t)
631
+ bdim = maybe_get_bdim(t)
632
+ r = _add_batch_dim(ft, bdim, lvl)
633
+ elif is_gradtrackingtensor(t):
634
+ disable_functorch = torch._C._DisableFuncTorch
635
+ with disable_functorch():
636
+ ft = _to_fake_tensor(get_unwrapped(t))
637
+ lvl = torch._C._functorch.maybe_get_level(t)
638
+ r = torch._C._functorch._wrap_for_grad(ft, lvl)
639
+
640
+ is_leaf = safe_is_leaf(t)
641
+ if t.requires_grad and safe_is_leaf(r):
642
+ r.requires_grad = True
643
+ elif t.requires_grad and not is_leaf:
644
+ with torch.enable_grad():
645
+ r = r.clone()
646
+ else:
647
+ sizes = t.size()
648
+ strides = t.stride()
649
+ r = callback(
650
+ lambda: torch.empty_strided(
651
+ sizes,
652
+ strides,
653
+ dtype=t.dtype,
654
+ device="meta",
655
+ )
656
+ )
657
+ return r
658
+
659
+ r = _to_fake_tensor(t)
660
+
661
+ elif t._is_view():
662
+ # Construct views in two steps: recursively meta-fy their
663
+ # base, and then create view(s) off that. NB: doing it
664
+ # directly from storage is WRONG because this won't cause
665
+ # version counters to get shared.
666
+ assert t._is_view()
667
+
668
+ base_symbolic_context = None
669
+ if shape_env and symbolic_context is not None:
670
+ from torch.fx.experimental.symbolic_shapes import (
671
+ StatelessSymbolicContext,
672
+ )
673
+
674
+ assert isinstance(symbolic_context, StatelessSymbolicContext)
675
+ # NB: This should generally be set when the input is a view,
676
+ # but the exception right now is for fake-ifying grads, which is
677
+ # a work in progress.
678
+ if symbolic_context.view_base_context is not None:
679
+ base_symbolic_context = symbolic_context.view_base_context
680
+
681
+ base = self.meta_tensor(
682
+ t._base,
683
+ shape_env,
684
+ callback,
685
+ source=torch._dynamo.source.AttrSource(source, "_base"),
686
+ symbolic_context=base_symbolic_context,
687
+ )
688
+
689
+ def is_c_of_r(complex_dtype, real_dtype):
690
+ return (
691
+ utils.is_complex_dtype(complex_dtype)
692
+ and utils.corresponding_real_dtype(complex_dtype)
693
+ == real_dtype
694
+ )
695
+
696
+ # In some situations, MetaConverter may be called in a
697
+ # context where autograd is disabled. For the _is_view
698
+ # assert to pass, we have to setup the autograd view
699
+ # metadata anyway. Do this by reenabling the
700
+ # ADInplaceOrView key. This is kind of a hack.
701
+ old_exclude = torch._C._dispatch_tls_is_dispatch_key_excluded(
702
+ torch._C.DispatchKey.ADInplaceOrView
703
+ )
704
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
705
+ torch._C.DispatchKey.ADInplaceOrView, False
706
+ )
707
+ try:
708
+ if base.dtype == t.dtype:
709
+ pass
710
+ elif is_c_of_r(base.dtype, t.dtype):
711
+ base = torch.view_as_real(base)
712
+ elif is_c_of_r(t.dtype, base.dtype):
713
+ base = torch.view_as_complex(base)
714
+ else:
715
+ # This is not guaranteed to succeed. If it fails, it
716
+ # means there is another dtype-converting view function
717
+ # that hasn't been handled here
718
+ base = base.view(t.dtype)
719
+
720
+ # This is very tricky. Naively, you might expect this
721
+ # to hold:
722
+ #
723
+ # if t.requires_grad and not safe_is_leaf(t)
724
+ # assert t._base.requires_grad
725
+ #
726
+ # But it's not true! As you can see in the following
727
+ # program:
728
+ #
729
+ # x = torch.zeros(4)
730
+ # y = x.view(1, 4)
731
+ # y.requires_grad = True
732
+ # z = y.view(1, 1, 4)
733
+ # assert z._base is x
734
+ #
735
+ # So we may have to do *two* views out of the base to
736
+ # recreate this situation.
737
+ if safe_is_leaf(t):
738
+ # Leaf views that track view metadata are created by
739
+ # creating a view inside a no_grad block
740
+ with torch.no_grad(), maybe_suppress():
741
+ r = view_from_base(base, t)
742
+ # As it's a leaf, we can directly assign requires_grad
743
+ r.requires_grad = t.requires_grad
744
+ else:
745
+ if t._base.requires_grad == t.requires_grad:
746
+ # Easy case, just run the view op
747
+ with torch.enable_grad(), maybe_suppress():
748
+ r = view_from_base(base, t)
749
+
750
+ # NB: We don't actaully faithfully replicate
751
+ # autograd connectivity, but that doesn't matter
752
+ # today. See following for more info:
753
+ # https://gist.github.com/soulitzer/e03f015b314c3f5fcf80888c69390913
754
+ else:
755
+ # Obscure case. Create a leaf view and give it the
756
+ # correct requires_grad, then do the final view.
757
+ # NB: Can't have a non-leaf without requiring grad!
758
+ assert t.requires_grad
759
+ with torch.no_grad():
760
+ mid = base.view(base.shape)
761
+ mid.requires_grad = t.requires_grad
762
+ with torch.enable_grad(), maybe_suppress():
763
+ r = view_from_base(mid, t)
764
+ # The CreationMeta influences whether or not inplace
765
+ # mutation is an error or not. So we need to make
766
+ # sure we properly propagate this as well.
767
+ torch._C._autograd._set_creation_meta(
768
+ r, torch._C._autograd._get_creation_meta(t)
769
+ )
770
+ finally:
771
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
772
+ torch._C.DispatchKey.ADInplaceOrView, old_exclude
773
+ )
774
+
775
+ else:
776
+ is_leaf = safe_is_leaf(t)
777
+
778
+ (
779
+ sizes,
780
+ strides,
781
+ storage_offset,
782
+ ) = sym_sizes_strides_storage_offset(t, source, symbolic_context)
783
+
784
+ # If we have a subclass that desugars into dense tensors,
785
+ # perform our callback on each inner tensor.
786
+ if is_traceable_wrapper_subclass(t):
787
+ r = empty_create_subclass(
788
+ t, outer_size=sizes, outer_stride=strides
789
+ )
790
+ else:
791
+ r = callback(
792
+ lambda: torch.empty_strided(
793
+ sizes,
794
+ strides,
795
+ dtype=t.dtype,
796
+ device="meta",
797
+ )
798
+ )
799
+
800
+ assert safe_is_leaf(r), "the callback you passed in doesn't detach"
801
+ if t.requires_grad:
802
+ r.requires_grad = t.requires_grad
803
+ if not is_leaf:
804
+ # Fake up some autograd history.
805
+ with torch.enable_grad():
806
+ # preserve_format is the default, but we want to
807
+ # emphasize how important it is to preserve
808
+ # format here
809
+ r = r.clone(memory_format=torch.preserve_format)
810
+
811
+ # Graph-Break for wrapped tensors
812
+ if not (
813
+ is_batchedtensor(t) or is_gradtrackingtensor(t)
814
+ ) and torch._C._functorch.is_functorch_wrapped_tensor(t):
815
+ return NotImplemented
816
+
817
+ s = t.untyped_storage()
818
+ swr = StorageWeakRef(s)
819
+ if swr not in self.storage_memo and (
820
+ r.is_nested
821
+ or (
822
+ r.stride() == strides
823
+ and r.storage_offset() == storage_offset
824
+ )
825
+ ):
826
+ # You're normal and happy, install the fresh storage into the memo
827
+ self.storage_memo[swr] = r.untyped_storage()
828
+ else:
829
+ # You're in crazy town; somehow you gave us a tensor
830
+ # that wasn't a view, but had nonzero storage offset,
831
+ # nontrivial strides (such that clone() couldn't
832
+ # preserve them), or already aliases with another
833
+ # tensor's storage. The most typical way to end
834
+ # up here is with set_. So use set_ to bludgeon this
835
+ # in.
836
+ r_s = self.meta_storage(s, callback=callback)
837
+ # NB: In principle, this should always work, but there
838
+ # is some subtle difference in the autograd metadata
839
+ # that means we will backprop the set_ call, even if
840
+ # r is declared as an input to grad.
841
+ # See https://github.com/pytorch/pytorch/issues/87956
842
+ # for the reproducer.
843
+ # NB: The in_kernel_invocation_manager here is necessary
844
+ # for fake tensor. If we run the set_ call with fake
845
+ # tensor on, r will improperly report that it is NOT a
846
+ # meta tensor but a cpu tensor, and then the set_ call
847
+ # will fail due to device mismatch. no_dispatch() is
848
+ # not enough, because the fake tensor will still claim
849
+ # to be a CPU tensor and you'll end up in the CPU
850
+ # kernel. Arguably this is a hack; a cleaner way to
851
+ # solve this is to have a FakeStorage concept which
852
+ # would report it's CPU device--no problem now! But
853
+ # this is difficult to do because we don't have storage
854
+ # subclasses. Relevant test is
855
+ # DynamicShapesFunctionTests::test_add_dynamic_shapes in
856
+ # test/dynamo/test_dynamic_shapes.py
857
+ maybe_fake_mgr: ContextManager[None] = contextlib.nullcontext()
858
+ from torch._subclasses.fake_tensor import (
859
+ in_kernel_invocation_manager,
860
+ maybe_get_fake_mode,
861
+ )
862
+
863
+ mb_fake_mode = maybe_get_fake_mode(r)
864
+ if mb_fake_mode is not None:
865
+ maybe_fake_mgr = in_kernel_invocation_manager(mb_fake_mode)
866
+ with maybe_fake_mgr, torch.no_grad():
867
+ r.set_(r_s, storage_offset, sizes, strides)
868
+
869
+ if safe_grad(t) is not None:
870
+ from torch._dynamo.source import AttrSource
871
+
872
+ # TODO: Use a valid grad-specific symbolic context instead of recycling
873
+ # the one from t. This isn't correct if e.g. t._is_view() != t.grad._is_view().
874
+ r.grad = self.meta_tensor(
875
+ safe_grad(t),
876
+ shape_env,
877
+ callback,
878
+ source=AttrSource(source, "grad"),
879
+ symbolic_context=symbolic_context,
880
+ )
881
+ torch._C._set_conj(r, t.is_conj())
882
+ torch._C._set_neg(r, t.is_neg())
883
+ # This can be skipped if necessary for performance reasons
884
+ assert_metadata_eq(assert_eq, t, r, skip_symbolic=True)
885
+ self.set_tensor_memo(t, r)
886
+
887
+ return self.get_tensor_memo(t)
888
+
889
+ def __call__(
890
+ self,
891
+ t,
892
+ shape_env=None,
893
+ *,
894
+ callback=lambda t: t(),
895
+ source=None,
896
+ symbolic_context=None,
897
+ ):
898
+ # TODO: zero tensors? We appear to have eliminated them by
899
+ # excluding complex for now
900
+
901
+ if isinstance(t, torch.Tensor) or is_traceable_wrapper_subclass(t):
902
+ if t.device.type != "xla" and any(
903
+ [
904
+ t.is_quantized,
905
+ t._is_view() and t._base is not None and t._base.is_sparse,
906
+ torch._is_functional_tensor(t),
907
+ t.device.type in ("lazy"),
908
+ # We need a way to test if a tensor is batched but there
909
+ # is no official APi to do it
910
+ # torch._C._is_batched(t),
911
+ ]
912
+ ):
913
+ # TODO: sparse should support meta
914
+ # NB technically to('meta') does work but our logging
915
+ # instrumentation will see the meta conversions and the
916
+ # tests all break so we just exclude this. In any case
917
+ # the to conversion isn't really right anyhow.
918
+
919
+ if torch._is_functional_tensor(t) and t.device.type != "lazy":
920
+ if t._is_view():
921
+ raise RuntimeError(
922
+ "Cannot safely fakify a view because this process drops the view information right now."
923
+ )
924
+
925
+ st = peek_interpreter_stack()
926
+ assert (
927
+ st is None or st.key() == TransformType.Functionalize
928
+ ), "Expect st to be either None or have Functionalize transform key."
929
+ if st is None:
930
+ # the case of AOTAutograd
931
+ torch._sync(t)
932
+ unwrap_t = torch._from_functional_tensor(t)
933
+ with torch._dispatch.python.suspend_functionalization():
934
+ fake_t = self.meta_tensor(
935
+ unwrap_t,
936
+ shape_env=shape_env,
937
+ callback=callback,
938
+ source=source,
939
+ symbolic_context=symbolic_context,
940
+ )
941
+ out = torch._to_functional_tensor(fake_t)
942
+ torch._mirror_autograd_meta_to(fake_t, out)
943
+ return out
944
+ else:
945
+ # torch.func.functionalize
946
+ reapply_views = torch._C._functionalization_reapply_views_tls()
947
+ unwrap_t = _unwrap_functional_tensor(t, reapply_views)
948
+ pop_st_ctx = (
949
+ torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack()
950
+ )
951
+ with pop_st_ctx:
952
+ fake_t = self.meta_tensor(
953
+ unwrap_t,
954
+ shape_env=shape_env,
955
+ callback=callback,
956
+ source=source,
957
+ symbolic_context=symbolic_context,
958
+ )
959
+ return _wrap_functional_tensor(fake_t, current_level())
960
+ self.miss += 1
961
+ return NotImplemented
962
+ else:
963
+ self.hit += 1
964
+
965
+ disable_functorch = torch._C._DisableFuncTorch
966
+ with disable_functorch():
967
+ r = self.meta_tensor(
968
+ t,
969
+ shape_env=shape_env,
970
+ callback=callback,
971
+ source=source,
972
+ symbolic_context=symbolic_context,
973
+ )
974
+ if type(t) is torch.nn.Parameter:
975
+ # NB: Cannot directly use Parameter constructor
976
+ # because that would force a detach, not desirable
977
+ r._is_param = True
978
+ return r
979
+ elif torch.overrides.is_tensor_like(t):
980
+ self.miss += 1
981
+ return NotImplemented
982
+ else:
983
+ # non-Tensor types don't count as hit or miss
984
+ return t
985
+
986
+
987
+ import torch._prims_common as utils
venv/lib/python3.10/site-packages/torch/_subclasses/schema_check_mode.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from collections import namedtuple
4
+ from copy import deepcopy
5
+ from itertools import combinations
6
+
7
+ import torch
8
+ from torch.fx.operator_schemas import normalize_function
9
+ from torch.testing._internal.jit_utils import clone_inputs
10
+ from torch.utils import _pytree as pytree
11
+ from torch.utils._python_dispatch import TorchDispatchMode
12
+ from torch.utils._pytree import tree_map
13
+
14
+ # Named Tuples used within SchemaCheckMode
15
+ Mutation = namedtuple("Mutation", ["op_name", "arg_name"])
16
+ Aliasing = namedtuple("Aliasing", ["op_name", "arg_name", "output_number"])
17
+
18
+ # Simplified naming for C++ classes
19
+ SchemaArgument = torch._C._SchemaArgument
20
+ SchemaArgType = torch._C._SchemaArgType
21
+ SchemaInfo = torch._C._SchemaInfo
22
+
23
+ # This TorchDispatchMode Subclass is used to verify op schemas
24
+ # This TorchDispatchMode Scubclass currently:
25
+ # - Records the called ops
26
+ # - Checks for mutations on all inputs
27
+ # - Checks for aliasing on all inputs
28
+
29
+
30
+ class SchemaCheckMode(TorchDispatchMode):
31
+ def __init__(self):
32
+ # Information recorded for testing purposes. For example:
33
+ # - incorrect schemas
34
+ # - overly conservative schemas
35
+ self.ops = []
36
+ self.mutated = []
37
+ self.aliasing = []
38
+
39
+ def reset_cache(self):
40
+ self.ops.clear()
41
+ self.mutated.clear()
42
+ self.aliasing.clear()
43
+
44
+ def display_ops(self):
45
+ print(*self.ops, sep=",")
46
+
47
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
48
+ def bitwise_equal(lhs, rhs):
49
+ if lhs.is_quantized:
50
+ # TODO: This is only OK if can't have NaN quantized; idk if
51
+ # this is actually true
52
+ return torch.equal(lhs, rhs)
53
+ else:
54
+ return torch.allclose(lhs, rhs, equal_nan=True)
55
+
56
+ def has_mutated(before, after, md):
57
+ are_tensors = type(before) == torch.Tensor and type(after) == torch.Tensor
58
+ if (
59
+ are_tensors
60
+ and before.layout != torch.sparse_csr
61
+ and after.layout != torch.sparse_csr
62
+ ):
63
+ return not (
64
+ before.size() == after.size()
65
+ and bitwise_equal(before, after)
66
+ and md[0] == after.stride()
67
+ and md[1] == after._typed_storage()._cdata
68
+ )
69
+ return False
70
+
71
+ def has_aliased(lhs, rhs):
72
+ try:
73
+ return torch._C._overlaps(lhs, rhs)
74
+ except Exception as exception:
75
+ if str(exception).startswith("Cannot inspect value of type "):
76
+ return False
77
+ else:
78
+ raise exception
79
+
80
+ def standardize_name(name):
81
+ return name if name != "self" else "input"
82
+
83
+ def unwrap(e):
84
+ if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor:
85
+ try:
86
+ return e.elem
87
+ except AttributeError as t:
88
+ return e
89
+ return e
90
+
91
+ def parse_metadata(e):
92
+ if isinstance(e, torch.Tensor):
93
+ if not type(e) == torch.Tensor:
94
+ try:
95
+ current = e.elem
96
+ return (
97
+ deepcopy(current.stride()),
98
+ current._typed_storage()._cdata,
99
+ )
100
+ except AttributeError as t:
101
+ return None
102
+ # Sparse CSR tensors do not have strides or storage
103
+ elif e.layout != torch.sparse_csr:
104
+ return (deepcopy(e.stride()), e._typed_storage()._cdata)
105
+ return None
106
+
107
+ self.ops.append(func._schema.name)
108
+
109
+ # Clone and process arguments and outputs
110
+ pre_arguments = normalize_function(
111
+ func, args, kwargs, normalize_to_only_use_kwargs=True
112
+ ).kwargs
113
+
114
+ c_p_args = dict(zip(pre_arguments.keys(), clone_inputs(pre_arguments.values())))
115
+ cloned_arguments = {
116
+ name: tree_map(unwrap, c_p_args.get(name)) for name in c_p_args
117
+ }
118
+ cloned_metadata = {
119
+ name: [
120
+ parse_metadata(a) for a in pytree.tree_leaves(pre_arguments.get(name))
121
+ ]
122
+ for name in pre_arguments
123
+ }
124
+
125
+ out = func(*args, **kwargs)
126
+ arguments = {
127
+ name: tree_map(unwrap, pre_arguments.get(name)) for name in pre_arguments
128
+ }
129
+ tuple_out = out if isinstance(out, tuple) else (out,)
130
+ tuple_out = tree_map(unwrap, tuple_out)
131
+
132
+ schema_info = SchemaInfo(func._schema)
133
+ schema_info.add_argument_values(pre_arguments)
134
+
135
+ # Process arguments with outputs
136
+ for i in range(len(func._schema.arguments)):
137
+ arg = func._schema.arguments[i]
138
+ name = standardize_name(arg.name)
139
+ if arguments.get(name) is not None:
140
+ before = cloned_arguments.get(name)
141
+ md = cloned_metadata.get(name)
142
+ after = arguments.get(name)
143
+ for j in range(len(tuple_out)):
144
+ # aten::_unsafe_view is intended to have incorrect aliasing notation (hence unsafe)
145
+ unsafe_ops = ("aten::_unsafe_view", "aten::unsafe_split")
146
+ if (
147
+ has_aliased(tuple_out[j], after)
148
+ and func._schema.name not in unsafe_ops
149
+ ):
150
+ if not schema_info.may_contain_alias(
151
+ SchemaArgument(SchemaArgType.output, j),
152
+ SchemaArgument(SchemaArgType.input, i),
153
+ ):
154
+ raise RuntimeError(
155
+ f"Argument {name} is not defined to alias output but was aliasing"
156
+ )
157
+ else:
158
+ self.aliasing.append(
159
+ Aliasing(func._schema.name, name, f"output_{j}")
160
+ )
161
+ if after is tuple_out[j] and isinstance(after, torch.Tensor):
162
+ # Only mutable ops e.g. (add_, add.out) are allowed to directly return inputs.
163
+ if not schema_info.is_mutable(
164
+ SchemaArgument(SchemaArgType.input, i)
165
+ ) and func not in [
166
+ torch.ops.aten.lift.default,
167
+ torch.ops.aten.lift_fresh.default,
168
+ ]:
169
+ raise RuntimeError(
170
+ f"""\
171
+ Dispatcher operators below autograd are not allowed to directly return inputs.
172
+ However, we found that `outputs[{str(j)}] is {name}"""
173
+ )
174
+ if any(
175
+ has_mutated(a, b, c)
176
+ for a, b, c in zip(
177
+ pytree.tree_leaves(before), pytree.tree_leaves(after), md
178
+ )
179
+ ):
180
+ if not schema_info.is_mutable(
181
+ SchemaArgument(SchemaArgType.input, i)
182
+ ):
183
+ raise RuntimeError(
184
+ f"Argument {name} is not defined as mutable but was mutated"
185
+ )
186
+ else:
187
+ self.mutated.append(Mutation(func._schema.name, name))
188
+
189
+ # Aliasing between outputs
190
+ for i, j in combinations(range(len(func._schema.returns)), 2):
191
+ if has_aliased(tuple_out[i], tuple_out[j]):
192
+ if not schema_info.may_contain_alias(
193
+ SchemaArgument(SchemaArgType.output, i),
194
+ SchemaArgument(SchemaArgType.output, j),
195
+ ):
196
+ raise RuntimeError(f"Outputs {i} and {j} alias unexpectedly")
197
+
198
+ return out
venv/lib/python3.10/site-packages/torch/export/__init__.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import copy
3
+ import dataclasses
4
+ import inspect
5
+ import io
6
+ import os
7
+ import sys
8
+ import typing
9
+ import warnings
10
+ from enum import auto, Enum
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ Iterator,
16
+ List,
17
+ Optional,
18
+ Tuple,
19
+ Type,
20
+ TYPE_CHECKING,
21
+ Union,
22
+ )
23
+
24
+ import torch
25
+ import torch.utils._pytree as pytree
26
+ from torch.fx._compatibility import compatibility
27
+
28
+ from torch.fx.passes.infra.pass_base import PassResult
29
+ from torch.fx.passes.infra.pass_manager import PassManager
30
+
31
+ from torch.utils._pytree import (
32
+ FlattenFunc,
33
+ FromDumpableContextFn,
34
+ ToDumpableContextFn,
35
+ UnflattenFunc,
36
+ )
37
+
38
+ if TYPE_CHECKING:
39
+ # Import the following modules during type checking to enable code intelligence features,
40
+ # Do not import unconditionally, as they import sympy and importing sympy is very slow
41
+ from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint
42
+
43
+
44
+ __all__ = [
45
+ "Constraint",
46
+ "Dim",
47
+ "ExportBackwardSignature",
48
+ "ExportGraphSignature",
49
+ "ExportedProgram",
50
+ "ModuleCallEntry",
51
+ "ModuleCallSignature",
52
+ "dims",
53
+ "dynamic_dim",
54
+ "export",
55
+ "load",
56
+ "register_dataclass",
57
+ "save",
58
+ "unflatten",
59
+ "FlatArgsAdapter",
60
+ "UnflattenedModule",
61
+ ]
62
+
63
+
64
+ from .dynamic_shapes import Constraint, Dim, dims, dynamic_dim
65
+ from .exported_program import ExportedProgram, ModuleCallEntry, ModuleCallSignature
66
+ from .graph_signature import ExportBackwardSignature, ExportGraphSignature
67
+ from .unflatten import FlatArgsAdapter, unflatten, UnflattenedModule
68
+
69
+
70
+ PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]]
71
+
72
+
73
+ def export(
74
+ mod: torch.nn.Module,
75
+ args: Tuple[Any, ...],
76
+ kwargs: Optional[Dict[str, Any]] = None,
77
+ *,
78
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
79
+ strict: bool = True,
80
+ preserve_module_call_signature: Tuple[str, ...] = (),
81
+ ) -> ExportedProgram:
82
+ """
83
+ :func:`export` takes an arbitrary Python callable (an nn.Module, a function or
84
+ a method) along with example inputs, and produces a traced graph representing
85
+ only the Tensor computation of the function in an Ahead-of-Time (AOT) fashion,
86
+ which can subsequently be executed with different inputs or serialized. The
87
+ traced graph (1) produces normalized operators in the functional ATen operator set
88
+ (as well as any user-specified custom operators), (2) has eliminated all Python control
89
+ flow and data structures (with certain exceptions), and (3) records the set of
90
+ shape constraints needed to show that this normalization and control-flow elimination
91
+ is sound for future inputs.
92
+
93
+ **Soundness Guarantee**
94
+
95
+ While tracing, :func:`export()` takes note of shape-related assumptions
96
+ made by the user program and the underlying PyTorch operator kernels.
97
+ The output :class:`ExportedProgram` is considered valid only when these
98
+ assumptions hold true.
99
+
100
+ Tracing makes assumptions on the shapes (not values) of input tensors.
101
+ Such assumptions must be validated at graph capture time for :func:`export`
102
+ to succeed. Specifically:
103
+
104
+ - Assumptions on static shapes of input tensors are automatically validated without additional effort.
105
+ - Assumptions on dynamic shape of input tensors require explicit specification
106
+ by using the :func:`Dim` API to construct dynamic dimensions and by associating
107
+ them with example inputs through the ``dynamic_shapes`` argument.
108
+
109
+ If any assumption can not be validated, a fatal error will be raised. When that happens,
110
+ the error message will include suggested fixes to the specification that are needed
111
+ to validate the assumptions. For example :func:`export` might suggest the
112
+ following fix to the definition of a dynamic dimension ``dim0_x``, say appearing in the
113
+ shape associated with input ``x``, that was previously defined as ``Dim("dim0_x")``::
114
+
115
+ dim = Dim("dim0_x", max=5)
116
+
117
+ This example means the generated code requires dimension 0 of input ``x`` to be less
118
+ than or equal to 5 to be valid. You can inspect the suggested fixes to dynamic dimension
119
+ definitions and then copy them verbatim into your code without needing to change the
120
+ ``dynamic_shapes`` argument to your :func:`export` call.
121
+
122
+ Args:
123
+ mod: We will trace the forward method of this module.
124
+
125
+ args: Example positional inputs.
126
+
127
+ kwargs: Optional example keyword inputs.
128
+
129
+ dynamic_shapes:
130
+ An optional argument where the type should either be:
131
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
132
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
133
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
134
+ is defined in the original function signature.
135
+
136
+ The dynamic shape of a tensor argument can be specified as either
137
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
138
+ not required to include static dimension indices in this dict, but when they are,
139
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
140
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
141
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
142
+ recursively specified by using mappings or sequences of contained specifications.
143
+
144
+ strict: When enabled (default), the export function will trace the program through
145
+ TorchDynamo which will ensure the soundness of the resulting graph. Otherwise, the
146
+ exported program will not validate the implicit assumptions baked into the graph and
147
+ may cause behavior divergence between the original model and the exported one. This is
148
+ useful when users need to workaround bugs in the tracer, or simply want incrementally
149
+ enable safety in their models. Note that this does not affect the resulting IR spec
150
+ to be different and the model will be serialized in the same way regardless of what value
151
+ is passed here.
152
+ WARNING: This option is experimental and use this at your own risk.
153
+
154
+ Returns:
155
+ An :class:`ExportedProgram` containing the traced callable.
156
+
157
+ **Acceptable input/output types**
158
+
159
+ Acceptable types of inputs (for ``args`` and ``kwargs``) and outputs include:
160
+
161
+ - Primitive types, i.e. ``torch.Tensor``, ``int``, ``float``, ``bool`` and ``str``.
162
+ - Dataclasses, but they must be registered by calling :func:`register_dataclass` first.
163
+ - (Nested) Data structures comprising of ``dict``, ``list``, ``tuple``, ``namedtuple`` and
164
+ ``OrderedDict`` containing all above types.
165
+
166
+ """
167
+ from ._trace import _export
168
+
169
+ if not isinstance(mod, torch.nn.Module):
170
+ raise ValueError(
171
+ f"Expected `mod` to be an instance of `torch.nn.Module`, got {type(mod)}."
172
+ )
173
+
174
+ return _export(
175
+ mod,
176
+ args,
177
+ kwargs,
178
+ dynamic_shapes,
179
+ strict=strict,
180
+ preserve_module_call_signature=preserve_module_call_signature,
181
+ )
182
+
183
+
184
+ def save(
185
+ ep: ExportedProgram,
186
+ f: Union[str, os.PathLike, io.BytesIO],
187
+ *,
188
+ extra_files: Optional[Dict[str, Any]] = None,
189
+ opset_version: Optional[Dict[str, int]] = None,
190
+ ) -> None:
191
+ """
192
+
193
+ .. warning::
194
+ Under active development, saved files may not be usable in newer versions
195
+ of PyTorch.
196
+
197
+ Saves an :class:`ExportedProgram` to a file-like object. It can then be
198
+ loaded using the Python API :func:`torch.export.load <torch.export.load>`.
199
+
200
+ Args:
201
+ ep (ExportedProgram): The exported program to save.
202
+
203
+ f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to
204
+ implement write and flush) or a string containing a file name.
205
+
206
+ extra_files (Optional[Dict[str, Any]]): Map from filename to contents
207
+ which will be stored as part of f.
208
+
209
+ opset_version (Optional[Dict[str, int]]): A map of opset names
210
+ to the version of this opset
211
+
212
+
213
+ Example::
214
+
215
+ import torch
216
+ import io
217
+
218
+ class MyModule(torch.nn.Module):
219
+ def forward(self, x):
220
+ return x + 10
221
+
222
+ ep = torch.export.export(MyModule(), (torch.randn(5),))
223
+
224
+ # Save to file
225
+ torch.export.save(ep, 'exported_program.pt2')
226
+
227
+ # Save to io.BytesIO buffer
228
+ buffer = io.BytesIO()
229
+ torch.export.save(ep, buffer)
230
+
231
+ # Save with extra files
232
+ extra_files = {'foo.txt': b'bar'.decode('utf-8')}
233
+ torch.export.save(ep, 'exported_program.pt2', extra_files=extra_files)
234
+
235
+ """
236
+ from torch._export import save
237
+
238
+ if not isinstance(ep, ExportedProgram):
239
+ raise TypeError(
240
+ f"The 'ep' parameter must be an instance of 'ExportedProgram', got '{type(ep).__name__}' instead."
241
+ )
242
+
243
+ save(ep, f, extra_files=extra_files, opset_version=opset_version)
244
+
245
+
246
+ def load(
247
+ f: Union[str, os.PathLike, io.BytesIO],
248
+ *,
249
+ extra_files: Optional[Dict[str, Any]] = None,
250
+ expected_opset_version: Optional[Dict[str, int]] = None,
251
+ ) -> ExportedProgram:
252
+ """
253
+
254
+ .. warning::
255
+ Under active development, saved files may not be usable in newer versions
256
+ of PyTorch.
257
+
258
+ Loads an :class:`ExportedProgram` previously saved with
259
+ :func:`torch.export.save <torch.export.save>`.
260
+
261
+ Args:
262
+ ep (ExportedProgram): The exported program to save.
263
+
264
+ f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to
265
+ implement write and flush) or a string containing a file name.
266
+
267
+ extra_files (Optional[Dict[str, Any]]): The extra filenames given in
268
+ this map would be loaded and their content would be stored in the
269
+ provided map.
270
+
271
+ expected_opset_version (Optional[Dict[str, int]]): A map of opset names
272
+ to expected opset versions
273
+
274
+ Returns:
275
+ An :class:`ExportedProgram` object
276
+
277
+ Example::
278
+
279
+ import torch
280
+ import io
281
+
282
+ # Load ExportedProgram from file
283
+ ep = torch.export.load('exported_program.pt2')
284
+
285
+ # Load ExportedProgram from io.BytesIO object
286
+ with open('exported_program.pt2', 'rb') as f:
287
+ buffer = io.BytesIO(f.read())
288
+ buffer.seek(0)
289
+ ep = torch.export.load(buffer)
290
+
291
+ # Load with extra files.
292
+ extra_files = {'foo.txt': ''} # values will be replaced with data
293
+ ep = torch.export.load('exported_program.pt2', extra_files=extra_files)
294
+ print(extra_files['foo.txt'])
295
+ print(ep(torch.randn(5)))
296
+ """
297
+ from torch._export import load
298
+
299
+ return load(
300
+ f, extra_files=extra_files, expected_opset_version=expected_opset_version
301
+ )
302
+
303
+
304
+ def register_dataclass(
305
+ cls: Type[Any],
306
+ *,
307
+ serialized_type_name: Optional[str] = None,
308
+ ) -> None:
309
+ """
310
+ Registers a dataclass as a valid input/output type for :func:`torch.export.export`.
311
+
312
+ Args:
313
+ cls: the dataclass type to register
314
+ serialized_type_name: The serialized name for the dataclass. This is
315
+ required if you want to serialize the pytree TreeSpec containing this
316
+ dataclass.
317
+
318
+ Example::
319
+
320
+ @dataclass
321
+ class InputDataClass:
322
+ feature: torch.Tensor
323
+ bias: int
324
+
325
+ class OutputDataClass:
326
+ res: torch.Tensor
327
+
328
+ torch.export.register_dataclass(InputDataClass)
329
+ torch.export.register_dataclass(OutputDataClass)
330
+
331
+ def fn(o: InputDataClass) -> torch.Tensor:
332
+ res = res=o.feature + o.bias
333
+ return OutputDataClass(res=res)
334
+
335
+ ep = torch.export.export(fn, (InputDataClass(torch.ones(2, 2), 1), ))
336
+ print(ep)
337
+
338
+ """
339
+
340
+ from torch._export.utils import register_dataclass_as_pytree_node
341
+
342
+ return register_dataclass_as_pytree_node(
343
+ cls, serialized_type_name=serialized_type_name
344
+ )
venv/lib/python3.10/site-packages/torch/export/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_auto_functionalized_pass.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_remove_effect_tokens_pass.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_safeguard.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_trace.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_tree_utils.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/_unlift.cpython-310.pyc ADDED
Binary file (8.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/custom_obj.cpython-310.pyc ADDED
Binary file (571 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/dynamic_shapes.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/exported_program.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/graph_signature.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/__pycache__/unflatten.cpython-310.pyc ADDED
Binary file (22.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/export/_remove_auto_functionalized_pass.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import operator
8
+ from typing import List
9
+
10
+ import torch
11
+ from torch._higher_order_ops.auto_functionalize import (
12
+ auto_functionalized,
13
+ get_mutable_arg_names,
14
+ )
15
+ from torch.export import ExportedProgram
16
+
17
+
18
+ def unsafe_remove_auto_functionalized_pass(
19
+ ep: ExportedProgram,
20
+ ) -> ExportedProgram:
21
+ """
22
+ This pass removes an instances of the higher order op 'auto_functionalized',
23
+ and modifies the calling EP inplace to have the original mutator op.
24
+ This pass doesn't perform safety checks to make sure that this inplace mutation is safe.
25
+ """
26
+ auto_functionalize_nodes: List[torch.fx.Node] = []
27
+ for module in ep.graph_module.modules():
28
+ if not isinstance(module, torch.fx.GraphModule):
29
+ continue
30
+ for node in ep.graph.nodes:
31
+ if node.op == "call_function" and node.target is auto_functionalized:
32
+ auto_functionalize_nodes.append(node)
33
+
34
+ # Update every use of the HOP
35
+ for node in reversed(auto_functionalize_nodes):
36
+ func = node.args[0]
37
+ original_kwargs = node.kwargs
38
+ assert isinstance(func, torch._ops.OpOverload)
39
+
40
+ with ep.graph.inserting_before(node):
41
+ # This makes the call_function refer to every arg as a kwarg, this is weird but probably fine?
42
+ new_node = ep.graph.call_function(func, kwargs=node.kwargs)
43
+ for k, v in node.meta.items():
44
+ new_node.meta[k] = v
45
+
46
+ # Replace auto_functionalize(func, args) with just func(args)
47
+ node.replace_all_uses_with(new_node)
48
+
49
+ mutable_args_names = get_mutable_arg_names(new_node.target)
50
+ output_specs = ep.graph_signature.output_specs
51
+
52
+ # update the users of the auto_func node (the getitem nodes)
53
+ for user in list(new_node.users.keys()):
54
+ assert user.target == operator.getitem
55
+ # getitem corresponding to a mutated input, just replace all uses with the original input
56
+ if user.args[1] >= len(func._schema.returns):
57
+ assert user.args[1] <= len(func._schema.returns) + len(
58
+ mutable_args_names
59
+ )
60
+
61
+ # If the result of getitem was used in an output node, update the output spec with the correct name
62
+ adusted_index = user.args[1] - len(func._schema.returns)
63
+ original_arg = original_kwargs[mutable_args_names[adusted_index]]
64
+ for spec in output_specs:
65
+ if spec.arg.name == user.name:
66
+ spec.arg.name = original_arg.name # pyre-ignore
67
+ break
68
+
69
+ # This is a little fragile/implementation dependent, but the order of the mutable args is the same as the order
70
+ # of the getitem calls following the HOP.
71
+ user.replace_all_uses_with(
72
+ original_kwargs[mutable_args_names[adusted_index]]
73
+ )
74
+
75
+ if len(func._schema.returns) == 1:
76
+ # If the function has 1 return then it will just directly return the
77
+ # result -- we don't need a getitem. So we can replace all the
78
+ # getitem(auto_functionalized, 0) with just the note itself.
79
+ for user in list(new_node.users.keys()):
80
+ if user.args[1] == 0:
81
+ user.replace_all_uses_with(new_node)
82
+
83
+ # Same case as above, update the output spec if getitem result used in an output node
84
+ for spec in output_specs:
85
+ if spec.arg.name == user.name:
86
+ spec.arg.name = new_node.name
87
+ break
88
+
89
+ new_node.meta["val"] = node.meta["val"][: len(func._schema.returns)]
90
+ ep.graph.erase_node(node)
91
+
92
+ ep.graph.eliminate_dead_code()
93
+ return ep
venv/lib/python3.10/site-packages/torch/export/_remove_effect_tokens_pass.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from typing import List
3
+
4
+ import torch
5
+ from torch._higher_order_ops.effects import with_effects
6
+ from .exported_program import ExportedProgram
7
+ from .graph_signature import (
8
+ InputKind,
9
+ InputSpec,
10
+ OutputKind,
11
+ OutputSpec,
12
+ TensorArgument,
13
+ )
14
+
15
+
16
+ def _remove_effect_tokens(ep: ExportedProgram) -> ExportedProgram:
17
+ """
18
+ Removes the existance of tokens from the exported program, including:
19
+ - Removes the input and output tokens
20
+ - Replaces with_effects(token, func, args) with just func(args)
21
+
22
+ This function does an inplace modification on the given ExportedProgram.
23
+ """
24
+ num_tokens: int = 0
25
+ input_token_names: List[str] = []
26
+ new_input_specs: List[InputSpec] = []
27
+ for inp in ep.graph_signature.input_specs:
28
+ if inp.kind == InputKind.TOKEN:
29
+ num_tokens += 1
30
+ assert isinstance(inp.arg, TensorArgument)
31
+ input_token_names.append(inp.arg.name)
32
+ else:
33
+ new_input_specs.append(inp)
34
+
35
+ num_out_tokens: int = 0
36
+ new_output_specs: List[str] = []
37
+ output_token_names: List[OutputSpec] = []
38
+ for out in ep.graph_signature.output_specs:
39
+ if out.kind == OutputKind.TOKEN:
40
+ num_out_tokens += 1
41
+ output_token_names.append(out.arg.name)
42
+ else:
43
+ new_output_specs.append(out)
44
+
45
+ assert num_tokens == num_out_tokens
46
+
47
+ output_node = None
48
+ with_effect_nodes: List[torch.fx.Node] = []
49
+ for node in ep.graph.nodes:
50
+ if node.op == "output":
51
+ output_node = node
52
+ break
53
+
54
+ if not (node.op == "call_function" and node.target is with_effects):
55
+ continue
56
+
57
+ with_effect_nodes.append(node)
58
+
59
+ # Remove tokens from outputs
60
+ assert output_node is not None
61
+ output_args = output_node.args[0]
62
+ assert len(output_args) >= num_tokens
63
+ out_token_nodes = output_args[:num_tokens]
64
+ output_node.args = (tuple(output_args[num_tokens:]),)
65
+ for out_token in out_token_nodes:
66
+ assert out_token.name in output_token_names
67
+ ep.graph.erase_node(out_token)
68
+
69
+ # Replace with_effects(token, func, args) with just func(args)
70
+ for node in reversed(with_effect_nodes):
71
+ func = node.args[1]
72
+ assert isinstance(func, torch._ops.OpOverload)
73
+
74
+ with ep.graph.inserting_before(node):
75
+ new_node = ep.graph.call_function(func, node.args[2:])
76
+ for k, v in node.meta.items():
77
+ new_node.meta[k] = v
78
+
79
+ node.replace_all_uses_with(new_node)
80
+
81
+ # Update user getitem nodes
82
+ for user in list(new_node.users.keys()):
83
+ assert user.target == operator.getitem
84
+ # getitem(with_effects, 0) == token
85
+ if user.args[1] == 0:
86
+ ep.graph.erase_node(user)
87
+
88
+ if len(func._schema.returns) == 1:
89
+ # If the function has 1 return then it will just directly return the
90
+ # result -- we don't need a getitem. So we can replace all the
91
+ # getitem(with_effects, 1) with just the note itself.
92
+ for user in list(new_node.users.keys()):
93
+ assert user.args[1] == 1
94
+ user.replace_all_uses_with(new_node)
95
+
96
+ new_node.meta["val"] = node.meta["val"][1]
97
+ elif len(func._schema.returns) > 1:
98
+ # If the function has more than 1 return then since we got rid of
99
+ # the 1st return value (the token), we need to bump all the other
100
+ # getitem calls by 1 down
101
+ for user in list(new_node.users.keys()):
102
+ assert user.args[1] >= 1
103
+ user.args = (user.args[0], user.args[1] - 1)
104
+
105
+ new_node.meta["val"] = node.meta["val"][1:]
106
+ else:
107
+ assert len(func._schema.returns) == 0
108
+ assert len(new_node.users) == 0
109
+ new_node.meta["val"] = None
110
+
111
+ ep.graph.erase_node(node)
112
+
113
+ # Remove tokens from inputs
114
+ placeholders = [node for node in ep.graph.nodes if node.op == "placeholder"]
115
+ assert len(placeholders) >= num_tokens
116
+ inp_token_nodes = placeholders[:num_tokens]
117
+ for inp_token in inp_token_nodes:
118
+ assert inp_token.name in input_token_names
119
+ ep.graph.erase_node(inp_token)
120
+
121
+ # Update graph signature
122
+ ep.graph_signature.input_specs = new_input_specs
123
+ ep.graph_signature.output_specs = new_output_specs
124
+
125
+ ep.graph.eliminate_dead_code()
126
+ return ep
venv/lib/python3.10/site-packages/torch/export/_safeguard.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
3
+ from torch.overrides import TorchFunctionMode
4
+
5
+
6
+ class AutogradStateOpsFailSafeguard(TorchFunctionMode):
7
+ """
8
+ Detect grad state ops during exporting the graph and fail the process by
9
+ raising an error, to avoid unexpected behavior. Those grad mode ops could be:
10
+ `torch.no_grad`
11
+ `torch.enable_grad`
12
+ `torch.set_grad_enabled`
13
+
14
+ Export with predispatch mode is exempted.
15
+ """
16
+
17
+ def __torch_function__(self, func, types, args=(), kwargs=None):
18
+ kwargs = kwargs or {}
19
+ unsupported_grad_mode_ops = [
20
+ torch._C._set_grad_enabled,
21
+ ]
22
+ # It's only enabled while tracing, by confirming the torch dispatch mode is
23
+ # any active PROXY. This is to allow the autograd ops out of tracing.
24
+ current_state = torch._C.is_grad_enabled()
25
+ if func in unsupported_grad_mode_ops:
26
+ assert len(args) == 1
27
+ changed_state = args[0]
28
+ mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)
29
+ # Intend to check if it's not the pre_dispatch mode. It's allowed to use
30
+ # autograd ops in pre_dispatch mode, e.g. `torch.no_grad`
31
+ if (
32
+ mode
33
+ and isinstance(mode, ProxyTorchDispatchMode)
34
+ and not mode.pre_dispatch
35
+ and changed_state != current_state
36
+ ):
37
+ raise RuntimeError(
38
+ f"Encountered autograd state manager op {func} trying to change global autograd state "
39
+ "while exporting. This is unsafe because we don't capture this op in torch.export "
40
+ "today, hence we can't reflect the user intention soundly."
41
+ )
42
+ return func(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/export/_trace.py ADDED
@@ -0,0 +1,1060 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import functools
3
+ import inspect
4
+ import logging
5
+ import re
6
+ import time
7
+ import warnings
8
+ from contextlib import contextmanager, nullcontext
9
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
10
+
11
+ import torch
12
+ import torch._dynamo
13
+ import torch.fx
14
+
15
+ import torch.utils._pytree as pytree
16
+ from torch._dynamo.exc import UserError, UserErrorType
17
+ from torch._export.non_strict_utils import (
18
+ make_constraints,
19
+ make_fake_inputs,
20
+ make_fake_params_buffers,
21
+ )
22
+ from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
23
+ _AddRuntimeAssertionsForInlineConstraintsPass,
24
+ )
25
+ from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass
26
+ from torch._export.passes.lift_constants_pass import (
27
+ ConstantAttrMap,
28
+ lift_constants_pass,
29
+ rewrite_script_object_meta,
30
+ )
31
+ from torch._export.wrappers import _wrap_submodules
32
+ from torch._functorch.aot_autograd import aot_export_module
33
+ from torch._guards import detect_fake_mode
34
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
35
+ from torch._utils_internal import log_export_usage
36
+ from torch.export.exported_program import OutputKind
37
+ from torch.fx.experimental.symbolic_shapes import (
38
+ ConstraintViolationError,
39
+ free_unbacked_symbols,
40
+ GuardOnDataDependentSymNode,
41
+ ShapeEnv,
42
+ )
43
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
44
+ from torch.utils._sympy.value_ranges import ValueRangeError
45
+
46
+ from ._safeguard import AutogradStateOpsFailSafeguard
47
+
48
+ from .dynamic_shapes import _process_constraints, Constraint
49
+ from .exported_program import (
50
+ _disable_prexisiting_fake_mode,
51
+ ExportedProgram,
52
+ InputKind,
53
+ ModuleCallEntry,
54
+ ModuleCallSignature,
55
+ )
56
+ from .graph_signature import (
57
+ _sig_to_specs,
58
+ ArgumentSpec,
59
+ ConstantArgument,
60
+ CustomObjArgument,
61
+ ExportGraphSignature,
62
+ SymIntArgument,
63
+ TensorArgument,
64
+ )
65
+
66
+
67
+ log = logging.getLogger(__name__)
68
+
69
+
70
+ @dataclasses.dataclass
71
+ class ExportDynamoConfig:
72
+ """
73
+ Manage Export-specific configurations of Dynamo.
74
+ """
75
+
76
+ allow_rnn: bool = True
77
+ reorderable_logging_functions: Set[Callable] = dataclasses.field(
78
+ default_factory=set
79
+ )
80
+
81
+
82
+ DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig()
83
+ DEFAULT_EXPORT_DYNAMO_CONFIG.reorderable_logging_functions = {
84
+ logging.critical,
85
+ logging.debug,
86
+ logging.error,
87
+ logging.exception,
88
+ logging.info,
89
+ logging.log,
90
+ logging.warning,
91
+ print,
92
+ warnings.warn,
93
+ }
94
+
95
+
96
+ @contextmanager
97
+ def _ignore_backend_decomps():
98
+ orig_mkldnn_flag = torch.backends.mkldnn.set_flags(False)
99
+ orig_nnpack_flag = torch.backends.nnpack.set_flags(False)
100
+ try:
101
+ yield
102
+ finally:
103
+ torch.backends.mkldnn.set_flags(*orig_mkldnn_flag)
104
+ torch.backends.nnpack.set_flags(*orig_nnpack_flag)
105
+
106
+
107
+ def _convert_input_to_fake(gm, args, kwargs):
108
+ params_buffers = _get_params_buffers(gm)
109
+ fake_inps: List[torch.Tensor] = []
110
+ for node in gm.graph.nodes:
111
+ if node.op == "placeholder" and "val" in node.meta:
112
+ fake_val = node.meta["val"]
113
+ if fake_val is not None and isinstance(fake_val, torch.Tensor):
114
+ fake_inps.append(fake_val)
115
+
116
+ if detected_fake_mode := detect_fake_mode(fake_inps):
117
+ fake_mode = detected_fake_mode
118
+ else:
119
+ fake_mode = FakeTensorMode(shape_env=ShapeEnv())
120
+
121
+ if len(args) == 0 and len(kwargs) == 0:
122
+ return (), {}, params_buffers, fake_mode
123
+
124
+ count = 0
125
+
126
+ def convert_to_fake(x):
127
+ nonlocal count
128
+ val = fake_inps[count]
129
+ count += 1
130
+ return val
131
+
132
+ fake_args = pytree.tree_map_only(torch.Tensor, convert_to_fake, args)
133
+ # TODO properly use the cached fake tensor
134
+ fake_kwargs = pytree.tree_map_only(torch.Tensor, fake_mode.from_tensor, kwargs)
135
+ fake_params_buffers = pytree.tree_map_only(
136
+ torch.Tensor,
137
+ functools.partial(fake_mode.from_tensor, static_shapes=True),
138
+ params_buffers,
139
+ )
140
+ return fake_args, fake_kwargs, fake_params_buffers, fake_mode
141
+
142
+
143
+ def _replace_param_buffer_names(param_buffer_table, sig):
144
+ for spec in sig.input_specs:
145
+ if spec.kind in (
146
+ InputKind.PARAMETER,
147
+ InputKind.BUFFER,
148
+ ):
149
+ spec.target = param_buffer_table[spec.target]
150
+ for spec in sig.output_specs:
151
+ if spec.kind in (
152
+ OutputKind.BUFFER_MUTATION,
153
+ OutputKind.GRADIENT_TO_PARAMETER,
154
+ ):
155
+ spec.target = param_buffer_table[spec.target]
156
+
157
+
158
+ def _convert_to_positional_args(orig_arg_names, args, kwargs):
159
+ assert len(orig_arg_names) == len(args) + len(kwargs), (
160
+ f"Total number of arg names is expected to be {len(orig_arg_names)} "
161
+ f"but got {len(args)} positional args, {len(kwargs)} kwargs."
162
+ )
163
+ reordered_kwargs = [kwargs[kw_name] for kw_name in orig_arg_names[len(args) :]]
164
+ return (
165
+ *args,
166
+ *reordered_kwargs,
167
+ )
168
+
169
+
170
+ def _normalize_nn_module_stack(gm_torch_level, root_cls):
171
+ # Append a root module to every nn_module_stack.
172
+ root = "L['self']"
173
+ root_key = re.sub(r"[^a-zA-Z0-9]", "_", root)
174
+ for gm in gm_torch_level.modules():
175
+ if not isinstance(gm, torch.fx.GraphModule):
176
+ continue
177
+ for node in gm.graph.nodes:
178
+ if node.op in ["placeholder", "output"]:
179
+ continue
180
+ add_root = True
181
+ if nn_module_stack := node.meta.get("nn_module_stack", {}):
182
+ path, ty = next(iter(nn_module_stack.values()))
183
+ # After deserializing the class `ty` might not exist anymore so
184
+ # it could be a string
185
+ if inspect.isclass(ty) and issubclass(ty, torch.nn.Module):
186
+ # TODO Figure out why sometimes we have root sometimes we don't.
187
+ if path == root and ty is root_cls:
188
+ add_root = False
189
+ else:
190
+ assert isinstance(ty, str)
191
+ if add_root:
192
+
193
+ def normalize_path(path):
194
+ try:
195
+ parts = []
196
+
197
+ class Path:
198
+ def __getattr__(self, name):
199
+ parts.append(name)
200
+ return self
201
+
202
+ def __getitem__(self, idx):
203
+ parts.append(str(idx))
204
+ return self
205
+
206
+ eval(path, {"L": {"self": Path()}})
207
+ return ".".join(parts)
208
+ except Exception: # TODO(zhxchen17) Remove this.
209
+ return path
210
+
211
+ nn_module_stack = {root_key: (root, root_cls), **nn_module_stack}
212
+ node.meta["nn_module_stack"] = {
213
+ key: (normalize_path(path), ty)
214
+ for key, (path, ty) in nn_module_stack.items()
215
+ }
216
+
217
+
218
+ def _get_param_buffer_mapping(
219
+ original_module: torch.nn.Module,
220
+ traced_module: torch.nn.Module,
221
+ ) -> Dict[str, str]:
222
+ """
223
+ Returns a mapping of parameter/buffer names from the new module to the
224
+ original model. This is to help with restoring the FQN for parameter/buffers
225
+ of a traced module to what the original module contains.
226
+ """
227
+
228
+ param_lookup: Dict[int, List[str]] = {}
229
+ buffer_lookup: Dict[int, List[str]] = {}
230
+ for name, param in original_module.named_parameters(remove_duplicate=False):
231
+ param_lookup.setdefault(id(param), []).append(name)
232
+ for name, buffer in original_module.named_buffers(remove_duplicate=False):
233
+ buffer_lookup.setdefault(id(buffer), []).append(name)
234
+
235
+ param_buffer_table: Dict[str, str] = {}
236
+ for dynamo_name, dynamo_param in traced_module.named_parameters(
237
+ remove_duplicate=False
238
+ ):
239
+ assert dynamo_name not in param_buffer_table
240
+ if id(dynamo_param) in param_lookup:
241
+ param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)].pop()
242
+
243
+ for dynamo_name, dynamo_buffer in traced_module.named_buffers(
244
+ remove_duplicate=False
245
+ ):
246
+ assert dynamo_name not in param_buffer_table
247
+ if id(dynamo_buffer) in buffer_lookup:
248
+ param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)].pop()
249
+
250
+ return param_buffer_table
251
+
252
+
253
+ def _remap_constants(
254
+ orig_constant_attrs: ConstantAttrMap,
255
+ graph_signature: ExportGraphSignature,
256
+ constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
257
+ ) -> None:
258
+ """Rewrite the graph signature and constants table to use the FQN from the original module."""
259
+ remap_table: Dict[str, str] = {}
260
+ for name, value in constants.items():
261
+ if value in orig_constant_attrs:
262
+ remap_table[name] = orig_constant_attrs[value]
263
+
264
+ for spec in graph_signature.input_specs:
265
+ if spec.kind in (
266
+ InputKind.CONSTANT_TENSOR,
267
+ InputKind.CUSTOM_OBJ,
268
+ ):
269
+ orig_target = spec.target
270
+ assert orig_target is not None
271
+ spec.target = remap_table.get(orig_target, orig_target)
272
+
273
+ constant = constants[orig_target]
274
+ del constants[orig_target]
275
+ constants[spec.target] = constant
276
+
277
+
278
+ def _restore_state_dict(
279
+ original_module: torch.nn.Module, traced_module: torch.fx.GraphModule
280
+ ) -> None:
281
+ """
282
+ Restores the state dict of the traced module to that of the original module.
283
+ """
284
+ param_buffer_table = _get_param_buffer_mapping(original_module, traced_module)
285
+ # Since the graph module is flattened (no module heirarchy), we
286
+ # need to noramlize the module by replacing "." with "_". If we
287
+ # don't, it will try to save the weight to a submodule which no
288
+ # longer exists.
289
+ for name, fqn in param_buffer_table.items():
290
+ param_buffer_table[name] = fqn.replace(".", "_")
291
+
292
+ # Replace state dict attr names with the fqn
293
+ for name, fqn in param_buffer_table.items():
294
+ if not hasattr(traced_module, name):
295
+ continue
296
+
297
+ attr = getattr(traced_module, name)
298
+ if isinstance(attr, torch.Tensor) and not isinstance(attr, torch.nn.Parameter):
299
+ traced_module.register_buffer(fqn, attr)
300
+ else:
301
+ setattr(traced_module, fqn, attr)
302
+ delattr(traced_module, name)
303
+
304
+ # Replace graph getattr nodes with the correct name
305
+ for node in traced_module.graph.nodes:
306
+ if node.op == "get_attr":
307
+ attr_name = node.target
308
+ if attr_name in param_buffer_table:
309
+ node.target = param_buffer_table[attr_name]
310
+
311
+ traced_module.recompile()
312
+
313
+
314
+ def _export_to_torch_ir(
315
+ f: Callable,
316
+ args: Tuple[Any, ...],
317
+ kwargs: Optional[Dict[str, Any]] = None,
318
+ constraints: Optional[List[Constraint]] = None,
319
+ *,
320
+ preserve_module_call_signature: Tuple[str, ...] = (),
321
+ disable_constraint_solver: bool = False,
322
+ restore_fqn: bool = True,
323
+ _log_export_usage: bool = True,
324
+ ) -> torch.fx.GraphModule:
325
+ """
326
+ Traces either an nn.Module's forward function or just a callable with PyTorch
327
+ operations inside and produce a torch.fx.GraphModule in torch IR.
328
+ """
329
+
330
+ if _log_export_usage:
331
+ log_export_usage(event="export.private_api", flags={"_export_to_torch_ir"})
332
+
333
+ kwargs = kwargs or {}
334
+
335
+ if not isinstance(args, tuple):
336
+ raise UserError(
337
+ UserErrorType.INVALID_INPUT,
338
+ f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}",
339
+ )
340
+
341
+ with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)):
342
+ try:
343
+ module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {}
344
+ with _wrap_submodules(
345
+ f, preserve_module_call_signature, module_call_specs
346
+ ), _ignore_backend_decomps():
347
+ gm_torch_level, _ = torch._dynamo.export(
348
+ f,
349
+ constraints=constraints, # type: ignore[arg-type]
350
+ assume_static_by_default=True,
351
+ tracing_mode="symbolic",
352
+ disable_constraint_solver=disable_constraint_solver,
353
+ _log_export_usage=_log_export_usage,
354
+ )(
355
+ *args,
356
+ **kwargs,
357
+ )
358
+ except (ConstraintViolationError, ValueRangeError) as e:
359
+ raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200
360
+ except GuardOnDataDependentSymNode as e:
361
+ raise UserError( # noqa: TRY200
362
+ UserErrorType.ANTI_PATTERN,
363
+ f"Consider annotating your code using torch._constrain_as_*(). {str(e)}",
364
+ case_name="constrain_as_size_example",
365
+ )
366
+
367
+ gm_torch_level.meta["module_call_specs"] = module_call_specs
368
+
369
+ if isinstance(f, torch.nn.Module) and restore_fqn:
370
+ _restore_state_dict(f, gm_torch_level)
371
+
372
+ return gm_torch_level
373
+
374
+
375
+ def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
376
+ """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
377
+
378
+ Returns a dictionary mapping hash(value) to the name of the constant. We
379
+ have to abuse `hash` here unfortunately, see: [ScriptObject hash].
380
+ """
381
+ constants = ConstantAttrMap()
382
+ buffers_parameters = set(m.buffers())
383
+ buffers_parameters.update(m.parameters())
384
+
385
+ def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
386
+ for k, v in m.__dict__.items():
387
+ if isinstance(v, (torch.Tensor, torch.ScriptObject)):
388
+ if v in buffers_parameters:
389
+ # filter out buffers and parameters, leaving only constants
390
+ continue
391
+
392
+ fqn = ".".join(prefix_atoms + [k])
393
+ if v in constants:
394
+ raise ValueError(
395
+ f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
396
+ )
397
+
398
+ constants[v] = fqn
399
+ for k, v in m.named_children():
400
+ inner(v, prefix_atoms + [k], constants)
401
+
402
+ inner(m, [], constants)
403
+ return constants
404
+
405
+
406
+ def _export_non_strict(
407
+ mod: torch.nn.Module,
408
+ fake_args,
409
+ fake_kwargs,
410
+ fake_params_buffers,
411
+ constant_attrs: ConstantAttrMap,
412
+ *,
413
+ transform=lambda x: x, # TODO(zhxchen17) Revisit if this is needed later.
414
+ pre_dispatch=False,
415
+ ):
416
+ # [NOTE] If the user is exporting under training mode, we want to detect if there is any
417
+ # state change in the autograd global state and error. If the user is exporting under inference
418
+ # mode, we don't care.
419
+ is_grad_enabled = torch._C.is_grad_enabled()
420
+ grad_safe_guard = (
421
+ AutogradStateOpsFailSafeguard() if is_grad_enabled else nullcontext()
422
+ )
423
+
424
+ @contextmanager
425
+ def _compiling_state_context():
426
+ old_value = torch.compiler._is_compiling_flag
427
+ try:
428
+ torch.compiler._is_compiling_flag = True
429
+ yield
430
+ finally:
431
+ torch.compiler._is_compiling_flag = old_value
432
+
433
+ # This _reparametrize_module makes sure inputs and module.params/buffers have the same fake_mode,
434
+ # otherwise aot_export_module will error out because it sees a mix of fake_modes.
435
+ # And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about.
436
+ with torch.nn.utils.stateless._reparametrize_module(
437
+ mod, fake_params_buffers
438
+ ), grad_safe_guard, _ignore_backend_decomps(), _compiling_state_context(): # type: ignore[attr-defined]
439
+ gm, graph_signature = transform(aot_export_module)(
440
+ mod,
441
+ fake_args,
442
+ trace_joint=False,
443
+ pre_dispatch=pre_dispatch,
444
+ kwargs=fake_kwargs,
445
+ )
446
+ # TODO unfortunately preserving graph-level metadata is not
447
+ # working well with aot_export. So we manually copy it.
448
+ # (The node-level meta is addressed above.)
449
+ if isinstance(mod, torch.fx.GraphModule) and hasattr(mod, "meta"):
450
+ gm.meta.update(mod.meta)
451
+
452
+ if pre_dispatch:
453
+ from torch._export.passes.replace_set_grad_with_hop_pass import (
454
+ replace_set_grad_with_hop_pass,
455
+ )
456
+
457
+ gm = replace_set_grad_with_hop_pass(gm)
458
+
459
+ # NOTE: aot_export adds symint metadata for placeholders with int values;
460
+ # since these become specialized, we replace such metadata with the original values
461
+ flat_args = pytree.tree_leaves((fake_args, fake_kwargs))
462
+ index = 0
463
+ total_non_user_inputs = (
464
+ len(graph_signature.parameters)
465
+ + len(graph_signature.buffers)
466
+ + len(graph_signature.input_tokens)
467
+ )
468
+ for node in gm.graph.nodes:
469
+ if node.op == "placeholder":
470
+ if index >= total_non_user_inputs:
471
+ user_arg = flat_args[index - total_non_user_inputs]
472
+ if not isinstance(user_arg, torch.Tensor):
473
+ node.meta["val"] = user_arg
474
+ index += 1
475
+
476
+ is_joint = graph_signature.backward_signature is not None
477
+
478
+ def make_argument_spec(node) -> ArgumentSpec:
479
+ if isinstance(node, (int, bool, float, type(None))):
480
+ # For const outputs we just directly return this
481
+ return ConstantArgument(value=node)
482
+
483
+ assert (
484
+ "val" in node.meta
485
+ ), f"{node} is not a constant or a node with a 'val' metadata field"
486
+ val = node.meta["val"]
487
+ if isinstance(val, FakeTensor):
488
+ return TensorArgument(name=node.name)
489
+ elif isinstance(val, torch.SymInt):
490
+ return SymIntArgument(name=node.name)
491
+ elif isinstance(val, torch.ScriptObject):
492
+ return CustomObjArgument(
493
+ name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined]
494
+ )
495
+ else:
496
+ # TODO: this branch is likely wrong, all permissible ConstantArgument type
497
+ # should have been handled already
498
+ return ConstantArgument(value=val)
499
+
500
+ input_specs, output_specs = _sig_to_specs(
501
+ user_inputs=set(graph_signature.user_inputs),
502
+ inputs_to_parameters=graph_signature.inputs_to_parameters, # type: ignore[arg-type]
503
+ inputs_to_buffers=graph_signature.inputs_to_buffers, # type: ignore[arg-type]
504
+ user_outputs=set(graph_signature.user_outputs), # type: ignore[arg-type]
505
+ buffer_mutations=graph_signature.buffers_to_mutate, # type: ignore[arg-type]
506
+ user_input_mutations=graph_signature.user_inputs_to_mutate, # type: ignore[arg-type]
507
+ grad_params=graph_signature.backward_signature.gradients_to_parameters if is_joint else {}, # type: ignore[arg-type, union-attr]
508
+ grad_user_inputs=graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {}, # type: ignore[arg-type, union-attr]
509
+ loss_output=graph_signature.backward_signature.loss_output if is_joint else None, # type: ignore[arg-type, union-attr]
510
+ inputs=[
511
+ make_argument_spec(node)
512
+ for node in gm.graph.nodes
513
+ if node.op == "placeholder"
514
+ ],
515
+ outputs=[
516
+ make_argument_spec(node)
517
+ for node in pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args)
518
+ ],
519
+ input_tokens=graph_signature.input_tokens,
520
+ output_tokens=graph_signature.output_tokens,
521
+ )
522
+ export_graph_signature = ExportGraphSignature(
523
+ input_specs=input_specs, output_specs=output_specs
524
+ )
525
+
526
+ constants = rewrite_script_object_meta(gm)
527
+ constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs))
528
+
529
+ @dataclasses.dataclass
530
+ class _ExportedProgramNonStrict:
531
+ gm: torch.fx.GraphModule
532
+ sig: ExportGraphSignature
533
+ constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]
534
+
535
+ return _ExportedProgramNonStrict(
536
+ gm,
537
+ export_graph_signature,
538
+ constants,
539
+ )
540
+
541
+
542
+ def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]:
543
+ params_buffers: Dict[str, torch.Tensor] = {}
544
+ for name, param in mod.named_parameters(remove_duplicate=False):
545
+ params_buffers[name] = param
546
+
547
+ for name, buffer in mod.named_buffers(remove_duplicate=False):
548
+ params_buffers[name] = buffer
549
+ return params_buffers
550
+
551
+
552
+ def _rewrite_dynamo_tensor_constants(
553
+ orig_mod_buffers: Set[torch.Tensor],
554
+ traced_mod_buffers: Dict[str, torch.Tensor],
555
+ graph_signature: ExportGraphSignature,
556
+ constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
557
+ ):
558
+ """Dynamo erroneously marks tensor attributes on modules as a buffers.
559
+
560
+ Rewrite them to be tensor constants.
561
+ """
562
+ for spec in graph_signature.input_specs:
563
+ if spec.kind == InputKind.BUFFER:
564
+ assert spec.target is not None
565
+ value = traced_mod_buffers[spec.target]
566
+ if value not in orig_mod_buffers:
567
+ # This was a tensor constant erroneously marked as a buffer.
568
+ # Convert it int oa constant in the graph signature, and add its
569
+ # value to the constants table.
570
+ spec.kind = InputKind.CONSTANT_TENSOR
571
+ constants[spec.target] = value
572
+
573
+
574
+ def _rewrite_non_persistent_buffers(
575
+ orig_mod: torch.nn.Module,
576
+ graph_signature: ExportGraphSignature,
577
+ constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
578
+ ):
579
+ """Dynamo erroneously drops the persistent flag on buffers.
580
+
581
+ Rewrite non-persistent buffers to reflect the original module.
582
+ """
583
+ state_dict = orig_mod.state_dict()
584
+ for spec in graph_signature.input_specs:
585
+ if spec.kind == InputKind.BUFFER:
586
+ assert spec.target is not None
587
+ if spec.target not in state_dict:
588
+ assert spec.target not in constants
589
+ spec.persistent = False
590
+ constants[spec.target] = orig_mod.get_buffer(spec.target)
591
+
592
+
593
+ def get_ep_stats(ep: ExportedProgram) -> Dict[str, Any]:
594
+ op_count = 0
595
+ op_set = set()
596
+ for m in ep.graph_module.modules():
597
+ if not isinstance(m, torch.fx.GraphModule):
598
+ continue
599
+ for node in m.graph.nodes:
600
+ if node.op != "call_function":
601
+ continue
602
+ op_count += 1
603
+ assert hasattr(node.target, "__module__")
604
+ assert hasattr(node.target, "__name__")
605
+ op_set.add(f"{node.target.__module__}.{node.target.__name__}")
606
+ return {"op_count": op_count, "op_set": op_set}
607
+
608
+
609
+ _EXPORT_FLAGS: Optional[Set[str]] = None
610
+
611
+
612
+ def _log_export_wrapper(fn):
613
+ @functools.wraps(fn)
614
+ def wrapper(*args, **kwargs):
615
+ global _EXPORT_FLAGS
616
+ try:
617
+ start = time.time()
618
+ ep = fn(*args, **kwargs)
619
+ end = time.time()
620
+ log_export_usage(
621
+ event="export.time",
622
+ metrics=end - start,
623
+ flags=_EXPORT_FLAGS,
624
+ **get_ep_stats(ep),
625
+ )
626
+ except Exception as e:
627
+ t = type(e)
628
+ error_type = t.__module__ + "." + t.__qualname__
629
+ log_export_usage(
630
+ event="export.error",
631
+ type=error_type,
632
+ message=str(e),
633
+ flags=_EXPORT_FLAGS,
634
+ )
635
+ raise e
636
+ finally:
637
+ _EXPORT_FLAGS = None
638
+
639
+ return ep
640
+
641
+ return wrapper
642
+
643
+
644
+ @_log_export_wrapper
645
+ @_disable_prexisiting_fake_mode
646
+ def _export(
647
+ mod: torch.nn.Module,
648
+ args: Tuple[Any, ...],
649
+ kwargs: Optional[Dict[str, Any]] = None,
650
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
651
+ *,
652
+ strict: bool = True,
653
+ preserve_module_call_signature: Tuple[str, ...] = (),
654
+ pre_dispatch: bool = False,
655
+ ) -> ExportedProgram:
656
+ """
657
+ Traces either an nn.Module's forward function or just a callable with PyTorch
658
+ operations inside and produce a ExportedProgram.
659
+
660
+ Args:
661
+ f: the `nn.Module` to trace.
662
+
663
+ args: example positional inputs.
664
+
665
+ kwargs: optional example keyword inputs.
666
+
667
+ dynamic_shapes:
668
+ An optional argument where the type should either be:
669
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
670
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
671
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
672
+ is defined in the original function signature.
673
+
674
+ The dynamic shape of a tensor argument can be specified as either
675
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
676
+ not required to include static dimension indices in this dict, but when they are,
677
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
678
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
679
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
680
+ recursively specified by using mappings or sequences of contained specifications.
681
+
682
+ preserve_module_call_signature: A list of submodule paths for which the original
683
+ calling conventions are preserved as metadata.
684
+
685
+ Returns:
686
+ An ExportedProgram containing the traced method.
687
+ """
688
+ from .dynamic_shapes import _process_dynamic_shapes
689
+
690
+ global _EXPORT_FLAGS
691
+ flags = set()
692
+ flags.add("strict" if strict else "non_strict")
693
+ flags.add("pre_dispatch" if pre_dispatch else "aot_dispatch")
694
+ log_export_usage(event="export.enter", flags=flags)
695
+ _EXPORT_FLAGS = flags
696
+
697
+ constraints = _process_dynamic_shapes(mod, args, kwargs, dynamic_shapes) or []
698
+
699
+ kwargs = kwargs or {}
700
+
701
+ constant_attrs = _gather_constant_attrs(mod)
702
+
703
+ flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
704
+
705
+ if not strict:
706
+ out_spec = None
707
+
708
+ module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {}
709
+
710
+ def strip_root(x):
711
+ if isinstance(x, str) and x.startswith("_export_root"):
712
+ stripped = x[len("_export_root") :]
713
+ return stripped[1:] if stripped.startswith(".") else stripped
714
+ return x
715
+
716
+ def fixup_key(x):
717
+ return "L__self__" + strip_root(x)
718
+
719
+ def _tuplify_outputs(aot_export):
720
+ def _aot_export_non_strict(mod, args, kwargs=None, **flags):
721
+ kwargs = kwargs or {}
722
+
723
+ class Wrapper(torch.nn.Module):
724
+ def __init__(self, mod):
725
+ super().__init__()
726
+ self._export_root = mod
727
+
728
+ def forward(self, *args, **kwargs):
729
+ nonlocal out_spec
730
+ if isinstance(self._export_root, torch.fx.GraphModule):
731
+ with torch.fx.traceback.preserve_node_meta():
732
+ tree_out = torch.fx.Interpreter(self._export_root).run(
733
+ *args, **kwargs
734
+ )
735
+ else:
736
+ tree_out = self._export_root(*args, **kwargs)
737
+ flat_outs, out_spec = pytree.tree_flatten(tree_out)
738
+ return tuple(flat_outs)
739
+
740
+ wrapped_mod = Wrapper(mod)
741
+ # Patch export_root to the signatures so that wrapper module correctly populates the
742
+ # in/out spec
743
+ new_preserved_call_signatures = [
744
+ "_export_root." + i for i in preserve_module_call_signature
745
+ ]
746
+ with _wrap_submodules(
747
+ wrapped_mod, new_preserved_call_signatures, module_call_specs
748
+ ):
749
+ gm, sig = aot_export(wrapped_mod, args, kwargs=kwargs, **flags)
750
+
751
+ sig.parameters = pytree.tree_map(strip_root, sig.parameters)
752
+ sig.buffers = pytree.tree_map(strip_root, sig.buffers)
753
+ sig.inputs_to_buffers = pytree.tree_map(
754
+ strip_root, sig.inputs_to_buffers
755
+ )
756
+ sig.inputs_to_parameters = pytree.tree_map(
757
+ strip_root, sig.inputs_to_parameters
758
+ )
759
+ sig.buffers_to_mutate = pytree.tree_map(
760
+ strip_root, sig.buffers_to_mutate
761
+ )
762
+ for node in gm.graph.nodes:
763
+ if "nn_module_stack" in node.meta:
764
+ nn_module_stack = node.meta["nn_module_stack"]
765
+ node.meta["nn_module_stack"] = {
766
+ fixup_key(key): val
767
+ for key, val in pytree.tree_map(
768
+ strip_root, nn_module_stack
769
+ ).items()
770
+ }
771
+
772
+ return gm, sig
773
+
774
+ return _aot_export_non_strict
775
+
776
+ (
777
+ fake_mode,
778
+ fake_args,
779
+ fake_kwargs,
780
+ equalities_inputs,
781
+ original_signature,
782
+ ) = make_fake_inputs(mod, args, kwargs, constraints)
783
+
784
+ fake_params_buffers = make_fake_params_buffers(
785
+ fake_mode, _get_params_buffers(mod)
786
+ )
787
+ with fake_mode:
788
+ ep_non_strict = _export_non_strict(
789
+ mod,
790
+ fake_args,
791
+ fake_kwargs,
792
+ fake_params_buffers,
793
+ constant_attrs,
794
+ pre_dispatch=pre_dispatch,
795
+ transform=_tuplify_outputs,
796
+ )
797
+ try:
798
+ range_constraints = make_constraints(
799
+ fake_mode,
800
+ equalities_inputs,
801
+ original_signature,
802
+ ep_non_strict.gm,
803
+ )
804
+ except (ConstraintViolationError, ValueRangeError) as e:
805
+ raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200
806
+
807
+ assert out_spec is not None
808
+
809
+ gm = ep_non_strict.gm
810
+
811
+ module_call_signatures = {
812
+ strip_root(fqn): ModuleCallSignature(inputs=[], outputs=[], **specs)
813
+ for fqn, specs in module_call_specs.items()
814
+ }
815
+
816
+ if len(preserve_module_call_signature) > 0:
817
+ for node in gm.graph.nodes:
818
+ if node.target == torch.ops.higher_order._export_tracepoint:
819
+ if "path" in node.kwargs:
820
+ path = strip_root(node.kwargs["path"])
821
+ with gm.graph.inserting_before(node):
822
+ new_node = gm.graph.create_node(
823
+ "call_function",
824
+ torch.ops.higher_order._export_tracepoint,
825
+ args=node.args,
826
+ kwargs={
827
+ "path": path,
828
+ "kind": node.kwargs["kind"],
829
+ },
830
+ )
831
+ node.replace_all_uses_with(new_node)
832
+ gm.graph.erase_node(node)
833
+
834
+ res = CollectTracepointsPass(module_call_signatures, ep_non_strict.sig)(gm)
835
+ assert res is not None
836
+ gm = res.graph_module
837
+
838
+ _rewrite_non_persistent_buffers(mod, ep_non_strict.sig, ep_non_strict.constants)
839
+ return ExportedProgram(
840
+ root=gm,
841
+ graph=gm.graph,
842
+ graph_signature=ep_non_strict.sig,
843
+ state_dict=mod.state_dict(keep_vars=True),
844
+ range_constraints=range_constraints,
845
+ module_call_graph=[
846
+ ModuleCallEntry(
847
+ "",
848
+ ModuleCallSignature(
849
+ inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=out_spec
850
+ ),
851
+ )
852
+ ]
853
+ + [
854
+ ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items()
855
+ ],
856
+ example_inputs=(args, kwargs),
857
+ constants=ep_non_strict.constants,
858
+ )
859
+
860
+ gm_torch_level = _export_to_torch_ir(
861
+ mod,
862
+ args,
863
+ kwargs,
864
+ constraints,
865
+ preserve_module_call_signature=preserve_module_call_signature,
866
+ restore_fqn=False, # don't need to restore because we will do it later
867
+ _log_export_usage=False,
868
+ )
869
+
870
+ # We detect the fake_mode by looking at gm_torch_level's placeholders, this is the fake_mode created in dynamo.
871
+ (
872
+ fake_args,
873
+ fake_kwargs,
874
+ fake_params_buffers,
875
+ dynamo_fake_mode,
876
+ ) = _convert_input_to_fake(gm_torch_level, args, kwargs)
877
+
878
+ # First, we want to pass through the graph to try populating
879
+ # val field for getattr if there is anything missing.
880
+ # This can happen when quantization adds extra params and forgets
881
+ # to update "val"
882
+ for node in gm_torch_level.graph.nodes:
883
+ if node.op == "get_attr" and "val" not in node.meta:
884
+ attr = getattr(gm_torch_level, node.target)
885
+ # Checks if it is not a HigherOrderOp branch or a module
886
+ if not isinstance(attr, torch.nn.Module):
887
+ assert (
888
+ dynamo_fake_mode is not None
889
+ ), "Cannot find dynamo_fake_mode. This could be due to the exported graph module have no placeholders."
890
+ node.meta["val"] = dynamo_fake_mode.from_tensor(
891
+ attr, static_shapes=True
892
+ )
893
+
894
+ # When aot_export lifts the params, we lose the nn_module_stack
895
+ # and source_fn from the param nodes as they are treated as fresh inputs
896
+ # Therefore, we manually extract them before calling into aot_export
897
+ params_buffers_to_node_meta = {}
898
+ for node in gm_torch_level.graph.nodes:
899
+ target = node.target
900
+ meta = node.meta
901
+ if node.op == "call_module":
902
+ submodule = getattr(gm_torch_level, target)
903
+ if isinstance(submodule, torch.nn.Module):
904
+ for name, _ in submodule.named_parameters(
905
+ recurse=True, remove_duplicate=False
906
+ ):
907
+ params_buffers_to_node_meta[target + "." + name] = meta
908
+
909
+ for name, _ in submodule.named_buffers(
910
+ recurse=True, remove_duplicate=False
911
+ ):
912
+ params_buffers_to_node_meta[target + "." + name] = meta
913
+
914
+ if node.op == "get_attr":
915
+ submodule = getattr(gm_torch_level, target)
916
+ if not isinstance(submodule, torch.fx.GraphModule):
917
+ params_buffers_to_node_meta[target] = meta
918
+
919
+ # If the call_function uses param as input, we also need to update params' meta
920
+ # with this call_function node's meta.
921
+ # This is basically the same flow as torch.fx.traceback.preserve_meta()
922
+ if node.op == "call_function" and not isinstance(
923
+ node.target, torch._ops.HigherOrderOperator
924
+ ):
925
+ for arg in node._input_nodes:
926
+ if arg.op == "get_attr":
927
+ for entry in torch.fx.proxy._COPY_META_FIELDS:
928
+ if entry in meta:
929
+ params_buffers_to_node_meta[arg.target][entry] = meta[entry]
930
+
931
+ # Fix the graph output signature to be tuple if scalar
932
+ out_spec = orig_out_spec = gm_torch_level._out_spec
933
+ assert out_spec is not None
934
+ # aot_export expect the return type to always be a tuple.
935
+ if out_spec.type not in (list, tuple):
936
+ out_spec = pytree.TreeSpec(tuple, None, [out_spec])
937
+
938
+ orig_arg_names = gm_torch_level.graph._codegen.pytree_info.orig_args # type: ignore[attr-defined]
939
+
940
+ gm_torch_level.graph._codegen = _PyTreeCodeGen(
941
+ _PyTreeInfo(
942
+ orig_arg_names,
943
+ gm_torch_level._in_spec,
944
+ out_spec,
945
+ )
946
+ )
947
+ gm_torch_level.recompile()
948
+
949
+ _normalize_nn_module_stack(gm_torch_level, type(mod))
950
+
951
+ # NOTE: graph module expects only positional args
952
+ ep_non_strict = _export_non_strict(
953
+ gm_torch_level,
954
+ _convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs),
955
+ {},
956
+ fake_params_buffers,
957
+ constant_attrs,
958
+ pre_dispatch=pre_dispatch,
959
+ )
960
+
961
+ gm = ep_non_strict.gm
962
+ export_graph_signature = ep_non_strict.sig
963
+ constants = ep_non_strict.constants
964
+
965
+ # After aot_export, set the param/buffer metadata back into placeholders
966
+ # Technically, users can still construct this data from param names
967
+ # without relying on this metadata
968
+ for node in gm.graph.nodes:
969
+ if node.op == "placeholder":
970
+ if node.target in export_graph_signature.inputs_to_parameters:
971
+ param_name = export_graph_signature.inputs_to_parameters[node.target]
972
+ if param_name in params_buffers_to_node_meta:
973
+ for k, v in params_buffers_to_node_meta[param_name].items():
974
+ node.meta[k] = v
975
+ if node.target in export_graph_signature.inputs_to_buffers:
976
+ buffer_name = export_graph_signature.inputs_to_buffers[node.target]
977
+ if buffer_name in params_buffers_to_node_meta:
978
+ for k, v in params_buffers_to_node_meta[buffer_name].items():
979
+ node.meta[k] = v
980
+
981
+ # The unbacked symint symbols are updated in aot_export
982
+ # so we serialize them here instead of inside dynamo
983
+
984
+ gm.meta["inline_constraints"] = {
985
+ k: v
986
+ for k, v in dynamo_fake_mode.shape_env.var_to_range.items()
987
+ if free_unbacked_symbols(k)
988
+ }
989
+
990
+ num_lifted = next(
991
+ (
992
+ i
993
+ for i, s in enumerate(export_graph_signature.input_specs)
994
+ if s.kind == InputKind.USER_INPUT
995
+ ),
996
+ len(export_graph_signature.input_specs),
997
+ )
998
+ range_constraints = _process_constraints(
999
+ dynamo_fake_mode,
1000
+ gm,
1001
+ num_lifted,
1002
+ flat_args,
1003
+ )
1004
+
1005
+ # Do some cleanups on the graph module to restore the state dict to the
1006
+ # expected form. Each of these steps should probably get fixed upstream.
1007
+ # 1. Remove tensor constants that were added as buffers.
1008
+ _rewrite_dynamo_tensor_constants(
1009
+ orig_mod_buffers=set(mod.buffers()),
1010
+ traced_mod_buffers=dict(gm_torch_level.named_buffers()),
1011
+ graph_signature=ep_non_strict.sig,
1012
+ constants=ep_non_strict.constants,
1013
+ )
1014
+ # 2. Restore FQN of param/buffers
1015
+ param_buffer_table: Dict[str, str] = _get_param_buffer_mapping(mod, gm_torch_level)
1016
+ _replace_param_buffer_names(param_buffer_table, export_graph_signature)
1017
+
1018
+ # 3. Remove non-persistent buffers from the graph signature
1019
+ _rewrite_non_persistent_buffers(mod, ep_non_strict.sig, ep_non_strict.constants)
1020
+
1021
+ # 4. Rewrite constants to have the same FQN as the original module.
1022
+ _remap_constants(constant_attrs, export_graph_signature, constants)
1023
+
1024
+ module_call_signatures = {
1025
+ fqn: ModuleCallSignature(inputs=[], outputs=[], **specs)
1026
+ for fqn, specs in gm_torch_level.meta["module_call_specs"].items()
1027
+ }
1028
+
1029
+ if len(preserve_module_call_signature) > 0:
1030
+ res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm)
1031
+ assert res is not None
1032
+ gm = res.graph_module
1033
+
1034
+ assert orig_out_spec is not None
1035
+ exported_program = ExportedProgram(
1036
+ root=gm,
1037
+ graph=gm.graph,
1038
+ graph_signature=export_graph_signature,
1039
+ state_dict=mod.state_dict(keep_vars=True),
1040
+ range_constraints=range_constraints,
1041
+ module_call_graph=[
1042
+ ModuleCallEntry(
1043
+ "",
1044
+ ModuleCallSignature(
1045
+ inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=orig_out_spec
1046
+ ),
1047
+ )
1048
+ ]
1049
+ + [ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items()],
1050
+ example_inputs=(args, kwargs),
1051
+ constants=constants,
1052
+ )
1053
+ log.debug("Exported program from AOTAutograd:\n%s", exported_program)
1054
+
1055
+ if len(range_constraints) > 0:
1056
+ exported_program = exported_program._transform_do_not_use(
1057
+ _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints)
1058
+ )
1059
+
1060
+ return exported_program
venv/lib/python3.10/site-packages/torch/export/_tree_utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, Optional
2
+
3
+ from torch.utils._pytree import Context, TreeSpec
4
+
5
+
6
+ def reorder_kwargs(user_kwargs: Dict[str, Any], spec: TreeSpec) -> Dict[str, Any]:
7
+ """Reorder user-provided kwargs to match the order in `spec`. `spec` is
8
+ expected to be the in_spec of an exported program, i.e. the spec that
9
+ results from flattening `(args, kwargs)`.
10
+
11
+ We need this to provide consistent input ordering, such so that users can
12
+ pass in foo(a=a, b=b) OR foo(b=b, a=a) and receive the same result.
13
+ """
14
+ # Make sure that the spec is actually shaped like (args, kwargs)
15
+ assert spec.type is tuple
16
+ assert spec.num_children == 2
17
+ kwargs_spec = spec.children_specs[1]
18
+ assert kwargs_spec.type is dict
19
+
20
+ if set(user_kwargs) != set(kwargs_spec.context):
21
+ raise ValueError(
22
+ f"kwarg key mismatch: "
23
+ f"Got {list(user_kwargs)} but expected {kwargs_spec.context}"
24
+ )
25
+
26
+ reordered_kwargs = {}
27
+ for kw in kwargs_spec.context:
28
+ reordered_kwargs[kw] = user_kwargs[kw]
29
+
30
+ return reordered_kwargs
31
+
32
+
33
+ def is_equivalent(
34
+ spec1: TreeSpec,
35
+ spec2: TreeSpec,
36
+ equivalence_fn: Callable[[Optional[type], Context, Optional[type], Context], bool],
37
+ ) -> bool:
38
+ """Customizable equivalence check for two TreeSpecs.
39
+
40
+ Arguments:
41
+ spec1: The first TreeSpec to compare
42
+ spec2: The second TreeSpec to compare
43
+ equivalence_fn: A function to determine the equivalence of two
44
+ TreeSpecs by examining their types and contexts. It will be called like:
45
+
46
+ equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context)
47
+
48
+ This function will be applied recursively to all children.
49
+
50
+ Returns:
51
+ True if the two TreeSpecs are equivalent, False otherwise.
52
+ """
53
+ if not equivalence_fn(spec1.type, spec1.context, spec2.type, spec2.context):
54
+ return False
55
+
56
+ # Recurse on children
57
+ if len(spec1.children_specs) != len(spec2.children_specs):
58
+ return False
59
+
60
+ for child_spec1, child_spec2 in zip(spec1.children_specs, spec2.children_specs):
61
+ if not is_equivalent(child_spec1, child_spec2, equivalence_fn):
62
+ return False
63
+
64
+ return True