applied-ai-018 commited on
Commit
6f419cc
·
verified ·
1 Parent(s): e2ca9dd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/12.attention.dense.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh +47 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh +508 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh +537 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h +375 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h +9 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h +95 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h +115 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h +23 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h +208 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h +138 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h +92 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh +57 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h +290 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h +37 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h +174 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh +121 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h +11 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h +11 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h +10 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h +23 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh +413 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh +53 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h +249 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h +14 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h +29 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h +630 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h +401 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h +61 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h +85 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h +100 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h +52 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h +174 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h +57 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h +393 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h +133 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h +39 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cuda_dispatch.h +25 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_cpu_dispatch.h +24 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h +50 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_ops.h +28 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape.h +39 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_ops.h +39 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h +30 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h +39 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h +39 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h +24 -0
ckpts/universal/global_step120/zero/12.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d1e1de7da32d031f5d674826636ab1772d88fa39dc3da0721f360a1d0f17ae8
3
+ size 16778317
ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea2c853697ca65cd3f0efcbcf1140f0a964462dcd606ee35cb19f3f4de6f754
3
+ size 33555627
ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7fb268dfd9df89bfa25046edb21c47bc6a4852c3c2901a3fb650801b1fe63a5
3
+ size 33555612
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cuda/CUDAContext.h>
2
+
3
+ #include <cuda_runtime.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T>
11
+ __host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ namespace {
16
+
17
+ // Threads per block for our apply kernel
18
+ // FIXME: use occupancy calculator instead
19
+ constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
20
+ constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
21
+
22
+ template <int step = 1>
23
+ inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
24
+ if (curDevice == -1) return false;
25
+ uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
26
+ uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
27
+ uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
28
+ if (numBlocks > maxGridX)
29
+ numBlocks = maxGridX;
30
+ grid = dim3(numBlocks);
31
+ return true;
32
+ }
33
+
34
+ constexpr int getApplyBlocksPerSM() {
35
+ return AT_APPLY_BLOCKS_PER_SM;
36
+ }
37
+
38
+ constexpr int getApplyBlockSize() {
39
+ return AT_APPLY_THREADS_PER_BLOCK;
40
+ }
41
+
42
+ inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
43
+ return dim3(max_threads_per_block);
44
+ }
45
+
46
+ } // anonymous namespace
47
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/Half.h>
5
+ #include <c10/util/BFloat16.h>
6
+
7
+ #include <ATen/NumericUtils.h>
8
+
9
+ #if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))))
10
+ #include <cuda_bf16.h>
11
+ #endif
12
+
13
+ template <typename T>
14
+ struct AtomicFPOp;
15
+
16
+ template <>
17
+ struct AtomicFPOp<at::Half> {
18
+ template <typename func_t>
19
+ inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) {
20
+ unsigned int * address_as_ui =
21
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
22
+ unsigned int old = *address_as_ui;
23
+ unsigned int assumed;
24
+
25
+ at::Half hsum;
26
+ do {
27
+ assumed = old;
28
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
29
+ hsum = func(hsum, val);
30
+ old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
31
+ old = atomicCAS(address_as_ui, assumed, old);
32
+ } while (assumed != old);
33
+ hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
34
+ return hsum;
35
+ }
36
+ };
37
+
38
+ template <>
39
+ struct AtomicFPOp<at::BFloat16> {
40
+ template <typename func_t>
41
+ inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) {
42
+ unsigned int * address_as_ui =
43
+ (unsigned int *) ((char *)address - ((size_t)address & 2));
44
+ unsigned int old = *address_as_ui;
45
+ unsigned int assumed;
46
+
47
+ at::BFloat16 bsum;
48
+ do {
49
+ assumed = old;
50
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
51
+ bsum = func(bsum, val);
52
+ old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x;
53
+ old = atomicCAS(address_as_ui, assumed, old);
54
+ } while (assumed != old);
55
+ bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
56
+ return bsum.x;
57
+ }
58
+ };
59
+
60
+ template <>
61
+ struct AtomicFPOp<double> {
62
+ template <typename func_t>
63
+ inline __device__ double operator() (double * address, double val, const func_t& func) {
64
+ unsigned long long int* address_as_ull = (unsigned long long int*)address;
65
+ unsigned long long int old = *address_as_ull;
66
+ unsigned long long int assumed;
67
+
68
+ do {
69
+ assumed = old;
70
+ old = atomicCAS(address_as_ull, assumed, func(val, assumed));
71
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
72
+ } while (assumed != old);
73
+
74
+ return __longlong_as_double(old);
75
+ }
76
+ };
77
+
78
+ #define ATOMIC_INTEGER_IMPL(NAME) \
79
+ template <typename T, size_t n> \
80
+ struct Atomic##NAME##IntegerImpl; \
81
+ \
82
+ template<typename T> \
83
+ struct Atomic##NAME##IntegerImpl<T, 1> { \
84
+ template <typename func_t> \
85
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
86
+ size_t offset = (size_t)address & 3; \
87
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
88
+ uint32_t old = *address_as_ui; \
89
+ uint32_t shift = offset * 8; \
90
+ uint32_t old_byte; \
91
+ uint32_t newval; \
92
+ uint32_t assumed; \
93
+ \
94
+ do { \
95
+ assumed = old; \
96
+ old_byte = (old >> shift) & 0xff; \
97
+ newval = static_cast<uint8_t>(func(val, static_cast<T>(old_byte))); \
98
+ newval = (old & ~(0x000000ff << shift)) | (newval << shift); \
99
+ old = atomicCAS(address_as_ui, assumed, newval); \
100
+ } while (assumed != old); \
101
+ } \
102
+ }; \
103
+ \
104
+ template<typename T> \
105
+ struct Atomic##NAME##IntegerImpl<T, 2> { \
106
+ template <typename func_t> \
107
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
108
+ size_t offset = (size_t)address & 2; \
109
+ uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
110
+ bool is_32_align = offset; \
111
+ uint32_t old = *address_as_ui; \
112
+ uint32_t old_bytes; \
113
+ uint32_t newval; \
114
+ uint32_t assumed; \
115
+ \
116
+ do { \
117
+ assumed = old; \
118
+ old_bytes = is_32_align ? old >> 16 : old & 0xffff; \
119
+ newval = static_cast<uint16_t>(func(val, static_cast<T>(old_bytes))); \
120
+ newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \
121
+ old = atomicCAS(address_as_ui, assumed, newval); \
122
+ } while (assumed != old); \
123
+ } \
124
+ }; \
125
+ \
126
+ template<typename T> \
127
+ struct Atomic##NAME##IntegerImpl<T, 4> { \
128
+ template <typename func_t> \
129
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
130
+ uint32_t * address_as_ui = (uint32_t *) (address); \
131
+ uint32_t old = *address_as_ui; \
132
+ uint32_t newval; \
133
+ uint32_t assumed; \
134
+ \
135
+ do { \
136
+ assumed = old; \
137
+ newval = static_cast<uint32_t>(func(val, static_cast<T>(old))); \
138
+ old = atomicCAS(address_as_ui, assumed, newval); \
139
+ } while (assumed != old); \
140
+ } \
141
+ }; \
142
+ \
143
+ template<typename T> \
144
+ struct Atomic##NAME##IntegerImpl<T, 8> { \
145
+ template <typename func_t> \
146
+ inline __device__ void operator()(T *address, T val, const func_t& func) { \
147
+ unsigned long long * address_as_ui = (unsigned long long *) (address); \
148
+ unsigned long long old = *address_as_ui; \
149
+ unsigned long long newval; \
150
+ unsigned long long assumed; \
151
+ \
152
+ do { \
153
+ assumed = old; \
154
+ newval = static_cast<uint64_t>(func(val, static_cast<T>(old))); \
155
+ old = atomicCAS(address_as_ui, assumed, newval); \
156
+ } while (assumed != old); \
157
+ } \
158
+ };
159
+
160
+
161
+ # define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \
162
+ static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \
163
+ Atomic##NAME##IntegerImpl<DTYPE, sizeof(DTYPE)>()(address, \
164
+ val, \
165
+ [](DTYPE a, DTYPE b) { \
166
+ return OP; \
167
+ }); \
168
+ } \
169
+
170
+ ATOMIC_INTEGER_IMPL(Add)
171
+ GPU_ATOMIC_INTEGER(Add, a || b, bool)
172
+
173
+ // Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64)
174
+ static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) {
175
+ AtomicAddIntegerImpl<uint8_t, sizeof(uint8_t)>()(address,
176
+ val,
177
+ [](uint8_t a, uint8_t b) {
178
+ return a + b;
179
+ });
180
+ }
181
+
182
+ static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) {
183
+ AtomicAddIntegerImpl<int8_t, sizeof(int8_t)>()(address,
184
+ val,
185
+ [](int8_t a, int8_t b) {
186
+ return a + b;
187
+ });
188
+ }
189
+
190
+ static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) {
191
+ AtomicAddIntegerImpl<int16_t, sizeof(int16_t)>()(address,
192
+ val,
193
+ [](int16_t a, int16_t b) {
194
+ return a + b;
195
+ });
196
+ }
197
+
198
+ static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) {
199
+ return atomicAdd(address, val);
200
+ }
201
+
202
+ static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) {
203
+ #if defined(USE_ROCM)
204
+ __atomic_fetch_add(address, val, __ATOMIC_RELAXED);
205
+ #else
206
+ static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed");
207
+ atomicAdd(reinterpret_cast<unsigned long long int *>(address), static_cast<unsigned long long int>(val));
208
+ #endif
209
+ }
210
+
211
+ static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) {
212
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
213
+ return AtomicFPOp<at::Half>()(address, val,
214
+ [](at::Half hsum, at::Half val) {
215
+ return hsum + val;
216
+ });
217
+ #else
218
+ return atomicAdd(reinterpret_cast<__half*>(address), val);
219
+ #endif
220
+ }
221
+
222
+ static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) {
223
+ #if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))
224
+ return AtomicFPOp<at::BFloat16>()(address, val,
225
+ [](at::BFloat16 bsum, at::BFloat16 val) {
226
+ return bsum + val;
227
+ });
228
+ #else
229
+ __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val));
230
+ return *reinterpret_cast<c10::BFloat16*>(&r);
231
+ #endif
232
+ }
233
+
234
+ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
235
+ // from CUDA C Programmic Guide
236
+ static inline __device__ double atomicAdd(double* address, double val)
237
+ #if defined(__clang__) && defined(__CUDA__)
238
+ #pragma GCC diagnostic push
239
+ #pragma GCC diagnostic ignored "-Wgcc-compat"
240
+ __attribute__((enable_if(true, "")))
241
+ #pragma GCC diagnostic pop
242
+ #endif
243
+ {
244
+
245
+ return AtomicFPOp<double>()(address, val,
246
+ [](double val, unsigned long long int assumed) {
247
+ return __double_as_longlong(val + __longlong_as_double(assumed));
248
+ });
249
+ }
250
+ #elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__))
251
+
252
+ /* Note [hip-clang differences to hcc]
253
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
254
+ * The upcoming hip-clang compiler for ROCm differs from hcc in a few details.
255
+ * It exports the __HIP__ macro, we can hence differentiate between hcc and
256
+ * hip-clang. In the below, hcc only received support for atomicAdd with double
257
+ * typing after work week 18312. hip-clang had support from the first version.
258
+ * In general, the code-visible differences between hip-clang and hcc will be
259
+ * minimal.
260
+ */
261
+
262
+ #if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__
263
+ // This needs to be defined for the host side pass
264
+ static inline __device__ double atomicAdd(double *address, double val) { }
265
+ #endif
266
+ #endif
267
+
268
+ static inline __device__ double gpuAtomicAdd(double *address, double val) {
269
+ return atomicAdd(address, val);
270
+ }
271
+
272
+ static inline __device__ float gpuAtomicAdd(float *address, float val) {
273
+ return atomicAdd(address, val);
274
+ }
275
+
276
+ template<typename T>
277
+ static inline __device__ void gpuAtomicAdd(c10::complex<T> *address, c10::complex<T> val) {
278
+ gpuAtomicAdd(&address->real_, val.real_);
279
+ gpuAtomicAdd(&address->imag_, val.imag_);
280
+ }
281
+
282
+ /* Note [gpuAtomicAdd vs atomicAdd]
283
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
284
+ * Some extensions such as torchvision call atomicAdd()
285
+ * directly and require non-library provided data type support. Only for these, we
286
+ * continue to provide atomicAdd overloads.
287
+ */
288
+ static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
289
+ return gpuAtomicAdd(address, val);
290
+ }
291
+
292
+ static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) {
293
+ return gpuAtomicAdd(address, val);
294
+ }
295
+
296
+ static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) {
297
+ gpuAtomicAdd(address, val);
298
+ }
299
+
300
+ static inline __device__ void atomicAdd(int8_t *address, int8_t val) {
301
+ gpuAtomicAdd(address, val);
302
+ }
303
+
304
+ static inline __device__ void atomicAdd(int16_t *address, int16_t val) {
305
+ gpuAtomicAdd(address, val);
306
+ }
307
+
308
+ static inline __device__ void atomicAdd(int64_t *address, int64_t val) {
309
+ gpuAtomicAdd(address, val);
310
+ }
311
+
312
+ static inline __device__ void atomicAdd(bool *address, bool val) {
313
+ gpuAtomicAdd(address, val);
314
+ }
315
+
316
+ /* Note [explicitly non-returning atomics]
317
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318
+ * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet().
319
+ * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction.
320
+ * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd,
321
+ * therefore we need a new API 'gpuAtomicAddNoReturn'.
322
+ */
323
+ template<typename T>
324
+ static inline __device__ void gpuAtomicAddNoReturn(c10::complex<T> *address, c10::complex<T> val) { gpuAtomicAdd(address, val); }
325
+ static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); }
326
+ static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); }
327
+ static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); }
328
+ static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); }
329
+ static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); }
330
+ static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); }
331
+ static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); }
332
+ static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); }
333
+ static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); }
334
+
335
+ /* Special case fp32 atomic. */
336
+ #if defined(USE_ROCM)
337
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); }
338
+ #else
339
+ static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); }
340
+ #endif
341
+
342
+ // Atomic multiplication implementation.
343
+
344
+ ATOMIC_INTEGER_IMPL(Mul)
345
+ GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t)
346
+ GPU_ATOMIC_INTEGER(Mul, a * b, int8_t)
347
+ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
348
+ GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
349
+ GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
350
+
351
+ inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
352
+ return AtomicFPOp<at::Half>()(address, val,
353
+ [](at::Half bsum, at::Half val) {
354
+ return bsum * val;
355
+ });
356
+ }
357
+
358
+ inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) {
359
+ return AtomicFPOp<at::BFloat16>()(address, val,
360
+ [](at::BFloat16 bsum, at::BFloat16 val) {
361
+ return bsum * val;
362
+ });
363
+ }
364
+
365
+ inline __device__ double gpuAtomicMul(double * address, double val) {
366
+ return AtomicFPOp<double>()(address, val,
367
+ [](double val, unsigned long long int assumed) {
368
+ return __double_as_longlong(val * __longlong_as_double(assumed));
369
+ });
370
+ }
371
+
372
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
373
+ inline __device__ float gpuAtomicMul (float * address, float val) {
374
+ unsigned int* address_as_ull = (unsigned int*)address;
375
+ unsigned int old = *address_as_ull;
376
+ unsigned int assumed;
377
+
378
+ do {
379
+ assumed = old;
380
+ old = atomicCAS(address_as_ull, assumed,
381
+ __float_as_int(val *
382
+ __int_as_float(assumed)));
383
+
384
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
385
+ } while (assumed != old);
386
+
387
+ return __int_as_float(old);
388
+ }
389
+
390
+ // Atomic maximum implementation.
391
+
392
+ template <typename T>
393
+ __host__ __device__ T safe_max(T a, T b) {
394
+ #if defined(__HIPCC__)
395
+ // TODO: remove this special case for HIP when issue is fixed:
396
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
397
+ T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max<T>(a, b));
398
+ #else
399
+ T max = at::_isnan(b) ? b : std::max<T>(a, b);
400
+ #endif
401
+
402
+ return max;
403
+ }
404
+
405
+ ATOMIC_INTEGER_IMPL(Max)
406
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
407
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
408
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t)
409
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t)
410
+ GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t)
411
+
412
+ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
413
+ return AtomicFPOp<at::Half>()(address, val,
414
+ [](at::Half bsum, at::Half val) {
415
+ return safe_max(bsum, val);
416
+ });
417
+ }
418
+
419
+ inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
420
+ return AtomicFPOp<at::BFloat16>()(address, val,
421
+ [](at::BFloat16 bsum, at::BFloat16 val) {
422
+ return safe_max(bsum, val);
423
+ });
424
+ }
425
+
426
+ inline __device__ double gpuAtomicMax(double * address, double val) {
427
+ return AtomicFPOp<double>()(address, val,
428
+ [](double val, unsigned long long int assumed) {
429
+ return __double_as_longlong(safe_max(val, __longlong_as_double(assumed)));
430
+ });
431
+ }
432
+
433
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
434
+ inline __device__ float gpuAtomicMax(float * address, float val) {
435
+ unsigned int* address_as_ull = (unsigned int*)address;
436
+ unsigned int old = *address_as_ull;
437
+ unsigned int assumed;
438
+
439
+ do {
440
+ assumed = old;
441
+ old = atomicCAS(address_as_ull, assumed,
442
+ __float_as_int(safe_max(val, __int_as_float(assumed))));
443
+
444
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
445
+ } while (assumed != old);
446
+
447
+ return __int_as_float(old);
448
+ }
449
+
450
+ // Atomic minimum implementation.
451
+
452
+ template <typename T>
453
+ __host__ __device__ T safe_min(T a, T b) {
454
+ #if defined(__HIPCC__)
455
+ // TODO: remove this special case for HIP when issue is fixed:
456
+ // https://github.com/ROCm-Developer-Tools/HIP/issues/2209
457
+ T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min<T>(a, b));
458
+ #else
459
+ T min = at::_isnan(b) ? b : std::min<T>(a, b);
460
+ #endif
461
+
462
+ return min;
463
+ }
464
+
465
+ ATOMIC_INTEGER_IMPL(Min)
466
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
467
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
468
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t)
469
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t)
470
+ GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t)
471
+
472
+ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
473
+ return AtomicFPOp<at::Half>()(address, val,
474
+ [](at::Half bsum, at::Half val) {
475
+ return safe_min(bsum, val);
476
+ });
477
+ }
478
+
479
+ inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
480
+ return AtomicFPOp<at::BFloat16>()(address, val,
481
+ [](at::BFloat16 bsum, at::BFloat16 val) {
482
+ return safe_min(bsum, val);
483
+ });
484
+ }
485
+
486
+ inline __device__ double gpuAtomicMin(double * address, double val) {
487
+ return AtomicFPOp<double>()(address, val,
488
+ [](double val, unsigned long long int assumed) {
489
+ return __double_as_longlong(safe_min(val, __longlong_as_double(assumed)));
490
+ });
491
+ }
492
+
493
+ // Dont use a templated function for this since the addition function defaults to the CUDA built-in.
494
+ inline __device__ float gpuAtomicMin(float * address, float val) {
495
+ unsigned int* address_as_ull = (unsigned int*)address;
496
+ unsigned int old = *address_as_ull;
497
+ unsigned int assumed;
498
+
499
+ do {
500
+ assumed = old;
501
+ old = atomicCAS(address_as_ull, assumed,
502
+ __float_as_int(safe_min(val, __int_as_float(assumed))));
503
+
504
+ // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
505
+ } while (assumed != old);
506
+
507
+ return __int_as_float(old);
508
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ApplyGridUtils.cuh>
4
+ #include <ATen/cuda/detail/IndexUtils.cuh>
5
+ #include <ATen/core/TensorBase.h>
6
+ #include <ATen/ceil_div.h>
7
+ #include <ATen/cuda/Atomic.cuh>
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <c10/macros/Macros.h>
10
+ #include <ATen/native/Copy.h>
11
+
12
+ #include <math.h>
13
+
14
+ //
15
+ // This file contains pointwise operation functions and kernels that
16
+ // work on both contiguous and non-contiguous tensor arguments of
17
+ // arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without
18
+ // copying or temporary storage.
19
+ //
20
+
21
+ /*
22
+ NOTE [ CUDA_tensor_applyN helpers ]
23
+
24
+ The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4)
25
+ functions apply a pointwise operator to N tensor(s).
26
+
27
+ The calling convention is
28
+
29
+ 1. The template arguments should be, sequentially,
30
+ - First N typename args specify the scalar types of each of the N tensors.
31
+ - (Optional) `int step` arg specifies the number of elements processed
32
+ together at the same time.
33
+ Default is 1.
34
+ - A usually omitted (i.e., inferred) typename arg specifies the type of the
35
+ function/functor applied on `N * step` values in each iteration of each
36
+ CUDA thread.
37
+ 2. The arguments should be, sequentially,
38
+ - N tensors
39
+ - op: a function/functor that processes `N * step` values at the same time.
40
+ - If `step == 1`, it must have signature
41
+ `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where
42
+ `scalar*_t`s are the first N typename template args, and the inputs
43
+ are the `N` values from the `N` tensors retrieved at a common index.
44
+ - Otherwise, it must must have signature
45
+ void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times
46
+ scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times
47
+ ...,
48
+ scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times
49
+ Different from `step == 1` case, it processes `N * step` values taken
50
+ from `step` common indices. Moreover, the first input `n` represents the
51
+ number of valid indices (it will always have `0 < n <= step`). It will
52
+ almost always be `step`, but at the boundary we may not have full `step`
53
+ elements and `n` can be a lesser value.
54
+
55
+ E.g., if `step == 4` and `N == 2`, `op` could be
56
+
57
+ [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4,
58
+ scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) {
59
+ // Only process u1, ..., un and v1, ..., vn.
60
+ // So if `n == 3`, `u4` and `v4` need not to be considered.
61
+ }
62
+
63
+ In both cases, the references can actually be const, but at least one of
64
+ them should be non-const in order to write the output.
65
+ - (Optional, but recommended) N TensorArgType args that specify for each
66
+ tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite),
67
+ or only reads (i.e., TensorArgType::ReadOnly).
68
+ Default is TensorArgType::ReadWrite for first Tensor, and
69
+ TensorArgType::ReadOnly for the rest.
70
+
71
+ E.g.,
72
+
73
+ to compute a = b^2 for a and b of same dtype, we can call
74
+
75
+ CUDA_tensor_apply2<scalar, scalar>(
76
+ a, b,
77
+ [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; }
78
+ );
79
+
80
+ to work on 2 values at the same time, we can call
81
+
82
+ CUDA_tensor_apply2<scalar1, scalar2, 2>(
83
+ a, b,
84
+ [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2,
85
+ const scalar2 &b_val1, const scalar2 &b_val2) {
86
+ // call special vectorized op here, or just do elementwise and enjoy unrolling...
87
+ // if n == 1, only process a_val1 and b_val1
88
+ }
89
+ );
90
+ */
91
+
92
+ namespace at::cuda {
93
+
94
+ // TODO: combine with TensorArg? So far that's been for debugging, and this is functional...
95
+ enum class TensorArgType { ReadWrite, ReadOnly };
96
+
97
+ namespace {
98
+
99
+ // Rearrange dimensions for pointwise operations so that strides are in
100
+ // decreasing order as much as possible, so that kernels have better memory
101
+ // access patterns.
102
+ //
103
+ // For example, consider a binary operation on two "transposed" 2-dim tensors:
104
+ // sizes: 256 512
105
+ // aInfo->strides: 1 256
106
+ // bInfo->strides: 1 256
107
+ //
108
+ // Given this, each concurrent memory access inside kernelPointwiseApply2() is
109
+ // exactly 256 elements apart, resulting in poor performance.
110
+ //
111
+ // This function exchanges dimensions so that memory access is contiguous:
112
+ // sizes: 512 256
113
+ // aInfo->strides: 256 1
114
+ // bInfo->strides: 256 1
115
+ //
116
+ // (Actually, it becomes even better because now collapseDims() can turn each
117
+ // input into one contiguous array.)
118
+ //
119
+ // In general, given M (<=4) TensorInfo's with N dimensions, we can view each
120
+ // strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange
121
+ // strides[i] and [j] if
122
+ // (1) strides[i][k] < strides[j][k] for some k (0 <= k < M)
123
+ // (exchanging them will benefit input #k), and
124
+ // (2) strides[i][k] <= strieds[j][k] for all k
125
+ // (exchanging them will not make any input worse).
126
+ template <typename T1, typename IndexType,
127
+ typename T2 = void, typename T3 = void, typename T4 = void>
128
+ inline void rearrangeDims(detail::TensorInfo<T1, IndexType>* aInfo,
129
+ detail::TensorInfo<T2, IndexType>* bInfo = nullptr,
130
+ detail::TensorInfo<T3, IndexType>* cInfo = nullptr,
131
+ detail::TensorInfo<T4, IndexType>* dInfo = nullptr) {
132
+ int numInfos = 1;
133
+ int dims = aInfo->dims;
134
+ IndexType *sizes[4] = { aInfo->sizes, };
135
+ IndexType *strides[4] = { aInfo->strides, };
136
+
137
+ if (bInfo != nullptr) {
138
+ ++numInfos;
139
+ if (bInfo->dims != dims) return;
140
+ sizes[1] = bInfo->sizes;
141
+ strides[1] = bInfo->strides;
142
+ }
143
+
144
+ if (cInfo != nullptr) {
145
+ ++numInfos;
146
+ if (cInfo->dims != dims) return;
147
+ sizes[2] = cInfo->sizes;
148
+ strides[2] = cInfo->strides;
149
+ }
150
+
151
+ if (dInfo != nullptr) {
152
+ ++numInfos;
153
+ if (dInfo->dims != dims) return;
154
+ sizes[3] = dInfo->sizes;
155
+ strides[3] = dInfo->strides;
156
+ }
157
+
158
+ // Bail out if sizes do not match: we are using "deprecated pointwise
159
+ // behavior" among tensors of different shapes but same number of elements.
160
+ for (int i = 1; i < numInfos; ++i) {
161
+ for (int j = 0; j < dims; ++j) {
162
+ if (sizes[i][j] != sizes[0][j]) return;
163
+ }
164
+ }
165
+
166
+ for (int i = 0; i < dims - 1; ++i) {
167
+ // No need to consider dimensions of size 1.
168
+ if (sizes[0][i] == 1) continue;
169
+
170
+ for (int j = i + 1; j < dims; ++j) {
171
+ if (sizes[0][j] == 1) continue;
172
+
173
+ // Compare the relative sizes of strides between dim #i and dim #j.
174
+ bool hasIncreasingStrides = false;
175
+ bool hasDecreasingStrides = false;
176
+
177
+ for (int k = 0; k < numInfos; k++) {
178
+ IndexType stride_i = strides[k][i];
179
+ IndexType stride_j = strides[k][j];
180
+ if (stride_i < stride_j) {
181
+ hasIncreasingStrides = true;
182
+ } else if (stride_i > stride_j) {
183
+ hasDecreasingStrides = true;
184
+ }
185
+ }
186
+
187
+ if (hasIncreasingStrides && !hasDecreasingStrides) {
188
+ for (int k = 0; k < numInfos; k++) {
189
+ IndexType size = sizes[k][i];
190
+ sizes[k][i] = sizes[k][j];
191
+ sizes[k][j] = size;
192
+
193
+ IndexType stride = strides[k][i];
194
+ strides[k][i] = strides[k][j];
195
+ strides[k][j] = stride;
196
+ }
197
+ }
198
+ }
199
+ }
200
+ }
201
+
202
+ // The `remaining_steps` argument is used to support Op that operates on
203
+ // multiple elements at the same time. Generally, the strategy of ApplyOpN is to
204
+ // 1. Initialize `remaining_steps = step`, where `step` is the template arg of
205
+ // CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the
206
+ // number of elements in bound for this call. It will almost always equal to
207
+ // `step` except at boundaries.
208
+ // 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in
209
+ // bound), and recursively call `ApplyOpN` with `remaining_steps - 1`.
210
+ // 3. At `remaining_steps = 0`,
211
+ // if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`;
212
+ // if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep,
213
+ // tensor2_val1, tensor2_val2, ..., tesor2_valstep,
214
+ // ...
215
+ // tensorN_val1, tensorN_val2, ..., tesorN_valstep);`
216
+ //
217
+ // See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like.
218
+
219
+ template <typename Op,
220
+ typename scalar,
221
+ typename IndexType,
222
+ int ADims,
223
+ int remaining_steps,
224
+ typename... Offsets>
225
+ struct ApplyOp1 {
226
+ __device__ __forceinline__
227
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
228
+ IndexType linearIndex, Offsets... aOffsets) {
229
+ // Convert `linearIndex` into an offset of `a`
230
+ const IndexType aOffset = sizeof...(Offsets) < n ?
231
+ detail::IndexToOffset<scalar, IndexType, ADims>::get(linearIndex, a) : 0;
232
+
233
+ ApplyOp1<Op, scalar, IndexType, ADims, remaining_steps - 1, const IndexType, Offsets...>::apply(
234
+ a, op, n, linearIndex + 1, aOffsets..., aOffset
235
+ );
236
+ }
237
+ };
238
+
239
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
240
+ // We don't need to pass in how many elements need to processed in this case.
241
+ template <typename Op,
242
+ typename scalar,
243
+ typename IndexType,
244
+ int ADims,
245
+ typename Offset>
246
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offset> {
247
+ __device__ __forceinline__
248
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op,
249
+ int n, IndexType linearIndex, Offset offset) {
250
+ op(a.data[offset]);
251
+ }
252
+ };
253
+
254
+ template <typename Op,
255
+ typename scalar,
256
+ typename IndexType,
257
+ int ADims,
258
+ typename... Offsets>
259
+ struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offsets...> {
260
+ __device__ __forceinline__
261
+ static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
262
+ IndexType linearIndex, Offsets... offsets) {
263
+ op(n, a.data[offsets]...);
264
+ }
265
+ };
266
+
267
+ template <typename Op,
268
+ typename scalar,
269
+ typename IndexType,
270
+ int ADims,
271
+ int step>
272
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
273
+ C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
274
+ #endif
275
+ __global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
276
+ IndexType totalElements, const Op op) {
277
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
278
+ linearIndex < totalElements;
279
+ linearIndex += gridDim.x * blockDim.x * step) {
280
+ ApplyOp1<Op, scalar, IndexType, ADims, step>::apply(
281
+ a, op, ::min(step, static_cast<int>(totalElements - linearIndex)), linearIndex);
282
+ }
283
+ }
284
+
285
+
286
+ template <typename Op,
287
+ typename scalar1,
288
+ typename scalar2,
289
+ typename IndexType,
290
+ int ADims,
291
+ int BDims,
292
+ int remaining_steps,
293
+ typename... Offsets>
294
+ struct ApplyOp2 {
295
+ __device__ __forceinline__
296
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
297
+ detail::TensorInfo<scalar2, IndexType> &b,
298
+ const Op &op, int64_t n, IndexType linearIndex,
299
+ Offsets... aOffsets, Offsets... bOffsets) {
300
+ // Convert `linearIndex` into an offset of `a`
301
+ const IndexType aOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
302
+ detail::IndexToOffset<scalar1, IndexType, ADims>::get(linearIndex, a) : 0;
303
+
304
+ // Convert `linearIndex` into an offset of `b`
305
+ const IndexType bOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
306
+ detail::IndexToOffset<scalar2, IndexType, BDims>::get(linearIndex, b) : 0;
307
+
308
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, remaining_steps - 1, const IndexType, Offsets...>::apply(
309
+ a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset
310
+ );
311
+ }
312
+ };
313
+
314
+ // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
315
+ // We don't need to pass in how many elements need to processed in this case.
316
+ template <typename Op,
317
+ typename scalar1,
318
+ typename scalar2,
319
+ typename IndexType,
320
+ int ADims,
321
+ int BDims,
322
+ typename Offset>
323
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offset> {
324
+ __device__ __forceinline__
325
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
326
+ detail::TensorInfo<scalar2, IndexType> &b,
327
+ const Op &op, int /*n*/, IndexType /*linearIndex*/,
328
+ Offset aOffset, Offset bOffset) {
329
+ op(a.data[aOffset], b.data[bOffset]);
330
+ }
331
+ };
332
+
333
+ template <typename Op,
334
+ typename scalar1,
335
+ typename scalar2,
336
+ typename IndexType,
337
+ int ADims,
338
+ int BDims,
339
+ typename... Offsets>
340
+ struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offsets...> {
341
+ __device__ __forceinline__
342
+ static void apply(detail::TensorInfo<scalar1, IndexType> &a,
343
+ detail::TensorInfo<scalar2, IndexType> &b,
344
+ const Op &op, int n, IndexType linearIndex,
345
+ Offsets... aOffsets, Offsets... bOffsets) {
346
+ op(n, a.data[aOffsets]..., b.data[bOffsets]...);
347
+ }
348
+ };
349
+
350
+ template <typename Op,
351
+ typename scalar1,
352
+ typename scalar2,
353
+ typename IndexType,
354
+ int ADims, int BDims,
355
+ int step,
356
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
357
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
358
+ #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
359
+ C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm)
360
+ #endif
361
+ __global__ void
362
+ kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
363
+ detail::TensorInfo<scalar2, IndexType> b,
364
+ IndexType totalElements,
365
+ const Op op) {
366
+ for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
367
+ linearIndex < totalElements;
368
+ linearIndex += gridDim.x * blockDim.x * step) {
369
+ ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, step>::apply(
370
+ a, b, op, ::min(step, static_cast<int>(totalElements - linearIndex)),
371
+ linearIndex);
372
+ }
373
+ }
374
+
375
+ } // anonymous namespace
376
+
377
+ template <typename scalar1, typename scalar2, int step, typename Op,
378
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
379
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
380
+ inline bool CUDA_tensor_apply2(at::TensorBase a,
381
+ at::TensorBase b,
382
+ const Op op,
383
+ TensorArgType aType = TensorArgType::ReadWrite,
384
+ TensorArgType bType = TensorArgType::ReadOnly) {
385
+ TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(),
386
+ "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got "
387
+ "tensors with type ", a.device().type(), " and ", b.device().type());
388
+ int64_t totalElements = a.numel();
389
+
390
+ if (totalElements != b.numel()) {
391
+ return false;
392
+ }
393
+
394
+ if (a.dim() > MAX_TENSORINFO_DIMS ||
395
+ b.dim() > MAX_TENSORINFO_DIMS) {
396
+ return false;
397
+ }
398
+
399
+ if (a.numel() == 0) {
400
+ // Empty tensor; do nothing
401
+ return true;
402
+ }
403
+ const dim3 block = getApplyBlock(max_threads_per_block);
404
+
405
+ dim3 grid;
406
+ auto curDevice = current_device();
407
+ if (curDevice == -1) return false;
408
+ if (!getApplyGrid<step>(totalElements, grid, curDevice, max_threads_per_block)) {
409
+ return false;
410
+ }
411
+
412
+ /*
413
+ Expands readable/writable tensors whose indices may be "overlapped."
414
+ This ensures that each element of the tensor is operated on once and only
415
+ once.
416
+ */
417
+ TensorBase oldA;
418
+ TensorBase oldB;
419
+
420
+ if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) {
421
+ // Must perform in contiguous space
422
+ oldA = std::exchange(a, a.contiguous());
423
+ }
424
+ if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) {
425
+ // Must perform in contiguous space
426
+ oldB = std::exchange(b, b.contiguous());
427
+ }
428
+
429
+ // It is possible that the tensor dimensions are able to be collapsed,
430
+ // and thus we can reduce the actual code complexity of the copy by
431
+ // exploiting this knowledge statically, since the div/mod is the
432
+ // most expensive part of the operation, more so than memory accesses.
433
+ // For instance, when copying a non-contiguous to a contiguous tensor
434
+ // (or vice versa), the contiguous tensor can be collapsed to one
435
+ // dimension, and the loop to translate the linear index to the array
436
+ // index can be similarly collapsed. That is what this unrolling is for.
437
+
438
+ #define HANDLE_CASE(TYPE, A, B) \
439
+ kernelPointwiseApply2<Op, \
440
+ scalar1, \
441
+ scalar2, \
442
+ TYPE, A, B, step, \
443
+ max_threads_per_block, \
444
+ min_blocks_per_sm> \
445
+ <<<grid, block, 0, at::cuda::getCurrentCUDAStream(curDevice)>>>( \
446
+ aInfo, bInfo, static_cast<TYPE>(totalElements), op); \
447
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
448
+
449
+ #define HANDLE_B_CASE(TYPE, A, B) { \
450
+ switch (B) { \
451
+ case 1: \
452
+ HANDLE_CASE(TYPE, A, 1); \
453
+ break; \
454
+ case 2: \
455
+ HANDLE_CASE(TYPE, A, 2); \
456
+ break; \
457
+ default: \
458
+ HANDLE_CASE(TYPE, A, -1); \
459
+ break; \
460
+ } \
461
+ }
462
+
463
+ #define HANDLE_A_CASE(TYPE, A, B) { \
464
+ switch (A) { \
465
+ case 1: \
466
+ HANDLE_B_CASE(TYPE, 1, B); \
467
+ break; \
468
+ case 2: \
469
+ HANDLE_B_CASE(TYPE, 2, B); \
470
+ break; \
471
+ default: \
472
+ HANDLE_B_CASE(TYPE, -1, B); \
473
+ break; \
474
+ } \
475
+ }
476
+
477
+ if (detail::canUse32BitIndexMath(a) &&
478
+ detail::canUse32BitIndexMath(b)) {
479
+ detail::TensorInfo<scalar1, unsigned int> aInfo =
480
+ detail::getTensorInfo<scalar1, unsigned int>(a);
481
+
482
+ detail::TensorInfo<scalar2, unsigned int> bInfo =
483
+ detail::getTensorInfo<scalar2, unsigned int>(b);
484
+ rearrangeDims(&aInfo, &bInfo);
485
+ aInfo.collapseDims();
486
+ bInfo.collapseDims();
487
+
488
+ HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims);
489
+ } else {
490
+ detail::TensorInfo<scalar1, uint64_t> aInfo =
491
+ detail::getTensorInfo<scalar1, uint64_t>(a);
492
+
493
+ detail::TensorInfo<scalar2, uint64_t> bInfo =
494
+ detail::getTensorInfo<scalar2, uint64_t>(b);
495
+ rearrangeDims(&aInfo, &bInfo);
496
+ aInfo.collapseDims();
497
+ bInfo.collapseDims();
498
+
499
+ /*
500
+ Only instantiates the all 1D special case and the fallback all nD case for
501
+ large (64-bit indexed) tensors to reduce compilation time.
502
+ */
503
+ if (aInfo.dims == 1 && bInfo.dims == 1) {
504
+ HANDLE_CASE(uint64_t, 1, 1);
505
+ } else {
506
+ HANDLE_CASE(uint64_t, -1, -1);
507
+ }
508
+ }
509
+ #undef HANDLE_CASE
510
+ #undef HANDLE_B_CASE
511
+ #undef HANDLE_A_CASE
512
+
513
+ if (oldA.defined()) {
514
+ at::native::copy_ignoring_overlaps(oldA, a);
515
+ }
516
+
517
+ if (oldB.defined()) {
518
+ at::native::copy_ignoring_overlaps(oldB, b);
519
+ }
520
+
521
+ return true;
522
+ }
523
+
524
+ /* Provides default step = 1 to CUDA_tensor_apply2. */
525
+ template <typename scalar1, typename scalar2, typename Op,
526
+ int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
527
+ int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
528
+ inline bool CUDA_tensor_apply2(const at::TensorBase &a,
529
+ const at::TensorBase &b,
530
+ const Op op,
531
+ TensorArgType aType = TensorArgType::ReadWrite,
532
+ TensorArgType bType = TensorArgType::ReadOnly) {
533
+ return CUDA_tensor_apply2<scalar1, scalar2, 1, Op,
534
+ max_threads_per_block, min_blocks_per_sm>(a, b, op, aType, bType);
535
+ }
536
+
537
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ /*
3
+ Provides a subset of CUDA BLAS functions as templates:
4
+
5
+ gemm<Dtype>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c,
6
+ ldc)
7
+
8
+ gemv<Dtype>(transa, m, n, alpha, a, lda, x, incx, beta, y, incy)
9
+
10
+ dot<Dtype>(n, x, incx, y, incy, result)
11
+
12
+ where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot).
13
+ The functions are available in at::cuda::blas namespace.
14
+ */
15
+
16
+ #include <ATen/cuda/CUDAContext.h>
17
+ #include <ATen/OpMathType.h>
18
+
19
+ namespace at::cuda::blas {
20
+
21
+ // RAII guard that sets the CuBLAS pointer mode and restores it to
22
+ // its previous value when the guard is destroyed
23
+ class PointerModeGuard {
24
+ public:
25
+ PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) :
26
+ handle(handle) {
27
+ TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode));
28
+ TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode));
29
+ }
30
+
31
+ ~PointerModeGuard() {
32
+ cublasSetPointerMode(handle, previous_mode);
33
+ }
34
+
35
+ private:
36
+ cublasHandle_t handle;
37
+ cublasPointerMode_t previous_mode;
38
+ };
39
+
40
+ /* LEVEL 3 BLAS FUNCTIONS */
41
+
42
+ #define CUDABLAS_GEMM_ARGTYPES(Dtype) \
43
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
44
+ const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type<Dtype> beta,\
45
+ Dtype *c, int64_t ldc
46
+
47
+ #define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc
48
+
49
+ template <typename Dtype>
50
+ inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
51
+ AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name());
52
+ }
53
+
54
+ template <>
55
+ void gemm<double>(CUDABLAS_GEMM_ARGTYPES(double));
56
+ template <>
57
+ void gemm<float>(CUDABLAS_GEMM_ARGTYPES(float));
58
+ template <>
59
+ void gemm<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
60
+ template <>
61
+ void gemm<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
62
+ template <>
63
+ void gemm<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
64
+ template <>
65
+ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
66
+
67
+ template <typename Dtype>
68
+ inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
69
+ AT_ERROR("at::cuda::blas::gemm_internal: not implemented for ", typeid(Dtype).name());
70
+ }
71
+
72
+ template <>
73
+ void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double));
74
+ template <>
75
+ void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float));
76
+ template <>
77
+ void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
78
+ template <>
79
+ void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
80
+ template <>
81
+ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
82
+ template <>
83
+ void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
84
+
85
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
86
+ enum GEMMAndBiasActivationEpilogue {
87
+ None,
88
+ RELU,
89
+ GELU,
90
+ };
91
+
92
+ // NOTE: GELU activation is not supported prior to CUDA 11.4 and will
93
+ // do nothing if passed in that case.
94
+ template <typename Dtype>
95
+ void gemm_and_bias(
96
+ bool transpose_mat1,
97
+ bool transpose_mat2,
98
+ int64_t m,
99
+ int64_t n,
100
+ int64_t k,
101
+ at::opmath_type<Dtype> alpha_val,
102
+ const Dtype* mat1_ptr,
103
+ int64_t mat1_ld,
104
+ const Dtype* mat2_ptr,
105
+ int64_t mat2_ld,
106
+ const Dtype* bias,
107
+ Dtype* result_ptr,
108
+ int64_t result_ld,
109
+ GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None);
110
+
111
+ void int8_gemm(
112
+ bool transpose_mat1,
113
+ bool transpose_mat2,
114
+ int64_t m,
115
+ int64_t n,
116
+ int64_t k,
117
+ const int8_t* mat1_ptr,
118
+ int64_t mat1_ld,
119
+ const int8_t* mat2_ptr,
120
+ int64_t mat2_ld,
121
+ int32_t* result_ptr,
122
+ int64_t result_ld);
123
+
124
+ void scaled_gemm(
125
+ char transa,
126
+ char transb,
127
+ int64_t m,
128
+ int64_t n,
129
+ int64_t k,
130
+ const void* mat1_ptr,
131
+ const void* mat1_scale_ptr,
132
+ int64_t mat1_ld,
133
+ ScalarType mat1_dtype,
134
+ const void* mat2_ptr,
135
+ const void* mat2_scale_ptr,
136
+ int64_t mat2_ld,
137
+ ScalarType mat2_dtype,
138
+ const void* bias_ptr,
139
+ ScalarType bias_dtype,
140
+ void* result_ptr,
141
+ const void* result_scale_ptr,
142
+ int64_t result_ld,
143
+ ScalarType result_dtype,
144
+ void* amax_ptr,
145
+ bool use_fast_accum);
146
+ #endif
147
+
148
+ #define CUDABLAS_BGEMM_ARGTYPES(Dtype) \
149
+ char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
150
+ const Dtype *a, int64_t lda, int64_t stridea, \
151
+ const Dtype *b, int64_t ldb, int64_t strideb, \
152
+ at::opmath_type<Dtype> beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches
153
+
154
+ #define CUDABLAS_BGEMM_ARGS(Dtype) \
155
+ transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches
156
+
157
+ template <typename Dtype>
158
+ inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
159
+ AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name());
160
+ }
161
+
162
+ template <>
163
+ void bgemm<double>(CUDABLAS_BGEMM_ARGTYPES(double));
164
+ template <>
165
+ void bgemm<float>(CUDABLAS_BGEMM_ARGTYPES(float));
166
+ template <>
167
+ void bgemm<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
168
+ template <>
169
+ void bgemm<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
170
+ template <>
171
+ void bgemm<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
172
+ template <>
173
+ void bgemm<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
174
+
175
+ template <typename Dtype>
176
+ inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
177
+ AT_ERROR("at::cuda::blas::bgemm_internal: not implemented for ", typeid(Dtype).name());
178
+ }
179
+
180
+ template <>
181
+ void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double));
182
+ template <>
183
+ void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float));
184
+ template <>
185
+ void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
186
+ template <>
187
+ void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
188
+ template <>
189
+ void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
190
+ template <>
191
+ void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
192
+
193
+ #if defined(USE_ROCM) && ROCM_VERSION <= 50500
194
+ // ROCm 5.6 hipblas matches the const Dtype *A API, but prior hipblas does not.
195
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
196
+ hipblasHandle_t handle, hipblasSideMode_t side, hipblasFillMode_t uplo, \
197
+ hipblasOperation_t trans, hipblasDiagType_t diag, int m, int n, \
198
+ const Dtype *alpha, Dtype *A, int lda, Dtype *B, int ldb
199
+ #else
200
+ #define CUDABLAS_TRSM_ARGTYPES(Dtype) \
201
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
202
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
203
+ const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb
204
+ #endif
205
+
206
+ template <typename Dtype>
207
+ inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) {
208
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name());
209
+ }
210
+
211
+ template <>
212
+ TORCH_CUDA_CU_API void trsm<float>(CUDABLAS_TRSM_ARGTYPES(float));
213
+ template <>
214
+ TORCH_CUDA_CU_API void trsm<double>(CUDABLAS_TRSM_ARGTYPES(double));
215
+ template <>
216
+ TORCH_CUDA_CU_API void trsm<c10::complex<float>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<float>));
217
+ template <>
218
+ TORCH_CUDA_CU_API void trsm<c10::complex<double>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<double>));
219
+
220
+ #define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \
221
+ cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
222
+ cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
223
+ const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \
224
+ int batchCount
225
+
226
+ template <typename Dtype>
227
+ inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) {
228
+ TORCH_INTERNAL_ASSERT(
229
+ false,
230
+ "at::cuda::blas::trsmBatched: not implemented for ",
231
+ typeid(Dtype).name());
232
+ }
233
+
234
+ template <>
235
+ TORCH_CUDA_CU_API void trsmBatched<float>(CUDABLAS_TRSM_BATCHED_ARGTYPES(float));
236
+ template <>
237
+ TORCH_CUDA_CU_API void trsmBatched<double>(CUDABLAS_TRSM_BATCHED_ARGTYPES(double));
238
+ template <>
239
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<float>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<float>));
240
+ template <>
241
+ TORCH_CUDA_CU_API void trsmBatched<c10::complex<double>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<double>));
242
+
243
+ /* LEVEL 2 BLAS FUNCTIONS */
244
+
245
+ #define CUDABLAS_GEMV_ARGTYPES(Dtype) \
246
+ char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \
247
+ const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy
248
+
249
+ template <typename Dtype>
250
+ inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) {
251
+ AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name());
252
+ }
253
+
254
+ template <>
255
+ void gemv<double>(CUDABLAS_GEMV_ARGTYPES(double));
256
+ template <>
257
+ void gemv<float>(CUDABLAS_GEMV_ARGTYPES(float));
258
+ template <>
259
+ void gemv<c10::complex<double>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<double>));
260
+ template <>
261
+ void gemv<c10::complex<float>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<float>));
262
+ template <>
263
+ void gemv<at::Half>(CUDABLAS_GEMV_ARGTYPES(at::Half));
264
+ template <>
265
+ void gemv<at::BFloat16>(CUDABLAS_GEMV_ARGTYPES(at::BFloat16));
266
+
267
+ /* LEVEL 1 BLAS FUNCTIONS */
268
+
269
+ #define CUDABLAS_DOT_ARGTYPES(Dtype) \
270
+ cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \
271
+ int incy, Dtype *result
272
+
273
+ template <typename Dtype>
274
+ inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
275
+ AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name());
276
+ }
277
+
278
+ template <>
279
+ void dot<double>(CUDABLAS_DOT_ARGTYPES(double));
280
+ template <>
281
+ void dot<float>(CUDABLAS_DOT_ARGTYPES(float));
282
+ template <>
283
+ void dot<at::Half>(CUDABLAS_DOT_ARGTYPES(at::Half));
284
+ template <>
285
+ void dot<at::BFloat16>(CUDABLAS_DOT_ARGTYPES(at::BFloat16));
286
+ template <>
287
+ void dot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
288
+ template <>
289
+ void dot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
290
+
291
+ template <typename Dtype>
292
+ inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
293
+ AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name());
294
+ }
295
+
296
+ template <>
297
+ void vdot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
298
+ template <>
299
+ void vdot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
300
+
301
+ #define CUDABLAS_GETRS_ARGTYPES(Dtype) \
302
+ cublasHandle_t handle, cublasOperation_t trans, \
303
+ int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \
304
+ Dtype** dB_array, int ldb, int* info_array, int batchsize
305
+
306
+ template<class Dtype>
307
+ void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) {
308
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ",
309
+ typeid(Dtype).name());
310
+ }
311
+ template<>
312
+ TORCH_CUDA_CU_API void getrsBatched<float>(CUDABLAS_GETRS_ARGTYPES(float));
313
+ template<>
314
+ TORCH_CUDA_CU_API void getrsBatched<double>(CUDABLAS_GETRS_ARGTYPES(double));
315
+ template<>
316
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<float>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<float>));
317
+ template<>
318
+ TORCH_CUDA_CU_API void getrsBatched<c10::complex<double>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<double>));
319
+
320
+ #define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \
321
+ cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \
322
+ Dtype **tau_array, int *info, int batchsize
323
+
324
+ template <class Dtype>
325
+ void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) {
326
+ TORCH_INTERNAL_ASSERT(
327
+ false,
328
+ "at::cuda::blas::geqrfBatched: not implemented for ",
329
+ typeid(Dtype).name());
330
+ }
331
+ template <>
332
+ TORCH_CUDA_CU_API void geqrfBatched<float>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float));
333
+ template <>
334
+ TORCH_CUDA_CU_API void geqrfBatched<double>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double));
335
+ template <>
336
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<double>>(
337
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<double>));
338
+ template <>
339
+ TORCH_CUDA_CU_API void geqrfBatched<c10::complex<float>>(
340
+ CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<float>));
341
+
342
+ #define CUDABLAS_GETRF_ARGTYPES(Dtype) \
343
+ int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize
344
+
345
+ template<class Dtype>
346
+ void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) {
347
+ TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name());
348
+ }
349
+ template<>
350
+ TORCH_CUDA_CU_API void getrfBatched<float>(CUDABLAS_GETRF_ARGTYPES(float));
351
+ template<>
352
+ TORCH_CUDA_CU_API void getrfBatched<double>(CUDABLAS_GETRF_ARGTYPES(double));
353
+ template<>
354
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<double>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<double>));
355
+ template<>
356
+ TORCH_CUDA_CU_API void getrfBatched<c10::complex<float>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<float>));
357
+
358
+ #define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \
359
+ cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize
360
+
361
+ template <class Dtype>
362
+ void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) {
363
+ TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name());
364
+ }
365
+
366
+ template<>
367
+ TORCH_CUDA_CU_API void gelsBatched<double>(CUDABLAS_GELS_BATCHED_ARGTYPES(double));
368
+ template<>
369
+ TORCH_CUDA_CU_API void gelsBatched<float>(CUDABLAS_GELS_BATCHED_ARGTYPES(float));
370
+ template<>
371
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<double>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<double>));
372
+ template<>
373
+ TORCH_CUDA_CU_API void gelsBatched<c10::complex<float>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<float>));
374
+
375
+ } // namespace at::cuda::blas
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContextLight.h>
4
+
5
+ // Preserved for BC, as many files depend on these includes
6
+ #include <ATen/Context.h>
7
+ #include <c10/cuda/CUDAStream.h>
8
+ #include <c10/util/Logging.h>
9
+ #include <ATen/cuda/Exceptions.h>
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Light-weight version of CUDAContext.h with fewer transitive includes
3
+
4
+ #include <cstdint>
5
+
6
+ #include <cuda_runtime_api.h>
7
+ #include <cusparse.h>
8
+ #include <cublas_v2.h>
9
+
10
+ // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
11
+ // added bf16 support
12
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
13
+ #include <cublasLt.h>
14
+ #endif
15
+
16
+ #ifdef CUDART_VERSION
17
+ #include <cusolverDn.h>
18
+ #endif
19
+
20
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50300
21
+ #include <hipsolver/hipsolver.h>
22
+ #endif
23
+
24
+ #include <c10/core/Allocator.h>
25
+ #include <c10/cuda/CUDAFunctions.h>
26
+
27
+ namespace c10 {
28
+ struct Allocator;
29
+ }
30
+
31
+ namespace at::cuda {
32
+
33
+ /*
34
+ A common CUDA interface for ATen.
35
+
36
+ This interface is distinct from CUDAHooks, which defines an interface that links
37
+ to both CPU-only and CUDA builds. That interface is intended for runtime
38
+ dispatch and should be used from files that are included in both CPU-only and
39
+ CUDA builds.
40
+
41
+ CUDAContext, on the other hand, should be preferred by files only included in
42
+ CUDA builds. It is intended to expose CUDA functionality in a consistent
43
+ manner.
44
+
45
+ This means there is some overlap between the CUDAContext and CUDAHooks, but
46
+ the choice of which to use is simple: use CUDAContext when in a CUDA-only file,
47
+ use CUDAHooks otherwise.
48
+
49
+ Note that CUDAContext simply defines an interface with no associated class.
50
+ It is expected that the modules whose functions compose this interface will
51
+ manage their own state. There is only a single CUDA context/state.
52
+ */
53
+
54
+ /**
55
+ * DEPRECATED: use device_count() instead
56
+ */
57
+ inline int64_t getNumGPUs() {
58
+ return c10::cuda::device_count();
59
+ }
60
+
61
+ /**
62
+ * CUDA is available if we compiled with CUDA, and there are one or more
63
+ * devices. If we compiled with CUDA but there is a driver problem, etc.,
64
+ * this function will report CUDA is not available (rather than raise an error.)
65
+ */
66
+ inline bool is_available() {
67
+ return c10::cuda::device_count() > 0;
68
+ }
69
+
70
+ TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties();
71
+
72
+ TORCH_CUDA_CPP_API int warp_size();
73
+
74
+ TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device);
75
+
76
+ TORCH_CUDA_CPP_API bool canDeviceAccessPeer(
77
+ c10::DeviceIndex device,
78
+ c10::DeviceIndex peer_device);
79
+
80
+ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
81
+
82
+ /* Handles */
83
+ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
84
+ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
85
+ #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
86
+ TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
87
+ #endif
88
+
89
+ TORCH_CUDA_CPP_API void clearCublasWorkspaces();
90
+
91
+ #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300
92
+ TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle();
93
+ #endif
94
+
95
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+
5
+ #include <cuda.h>
6
+ #include <library_types.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ template <typename scalar_t>
11
+ cudaDataType getCudaDataType() {
12
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.")
13
+ }
14
+
15
+ template<> inline cudaDataType getCudaDataType<at::Half>() {
16
+ return CUDA_R_16F;
17
+ }
18
+ template<> inline cudaDataType getCudaDataType<float>() {
19
+ return CUDA_R_32F;
20
+ }
21
+ template<> inline cudaDataType getCudaDataType<double>() {
22
+ return CUDA_R_64F;
23
+ }
24
+ template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() {
25
+ return CUDA_C_16F;
26
+ }
27
+ template<> inline cudaDataType getCudaDataType<c10::complex<float>>() {
28
+ return CUDA_C_32F;
29
+ }
30
+ template<> inline cudaDataType getCudaDataType<c10::complex<double>>() {
31
+ return CUDA_C_64F;
32
+ }
33
+
34
+ // HIP doesn't define integral types
35
+ #ifndef USE_ROCM
36
+ template<> inline cudaDataType getCudaDataType<uint8_t>() {
37
+ return CUDA_R_8U;
38
+ }
39
+ template<> inline cudaDataType getCudaDataType<int8_t>() {
40
+ return CUDA_R_8I;
41
+ }
42
+ template<> inline cudaDataType getCudaDataType<int>() {
43
+ return CUDA_R_32I;
44
+ }
45
+ #endif
46
+
47
+ #if !defined(USE_ROCM)
48
+ template<> inline cudaDataType getCudaDataType<int16_t>() {
49
+ return CUDA_R_16I;
50
+ }
51
+ template<> inline cudaDataType getCudaDataType<int64_t>() {
52
+ return CUDA_R_64I;
53
+ }
54
+ template<> inline cudaDataType getCudaDataType<at::BFloat16>() {
55
+ return CUDA_R_16BF;
56
+ }
57
+ #endif
58
+
59
+ inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) {
60
+ switch (scalar_type) {
61
+ // HIP doesn't define integral types
62
+ #ifndef USE_ROCM
63
+ case c10::ScalarType::Byte:
64
+ return CUDA_R_8U;
65
+ case c10::ScalarType::Char:
66
+ return CUDA_R_8I;
67
+ case c10::ScalarType::Int:
68
+ return CUDA_R_32I;
69
+ #endif
70
+ case c10::ScalarType::Half:
71
+ return CUDA_R_16F;
72
+ case c10::ScalarType::Float:
73
+ return CUDA_R_32F;
74
+ case c10::ScalarType::Double:
75
+ return CUDA_R_64F;
76
+ case c10::ScalarType::ComplexHalf:
77
+ return CUDA_C_16F;
78
+ case c10::ScalarType::ComplexFloat:
79
+ return CUDA_C_32F;
80
+ case c10::ScalarType::ComplexDouble:
81
+ return CUDA_C_64F;
82
+ #if !defined(USE_ROCM)
83
+ case c10::ScalarType::Short:
84
+ return CUDA_R_16I;
85
+ case c10::ScalarType::Long:
86
+ return CUDA_R_64I;
87
+ case c10::ScalarType::BFloat16:
88
+ return CUDA_R_16BF;
89
+ #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080
90
+ case c10::ScalarType::Float8_e4m3fn:
91
+ return CUDA_R_8F_E4M3;
92
+ case c10::ScalarType::Float8_e5m2:
93
+ return CUDA_R_8F_E5M2;
94
+ #endif
95
+ #else // USE_ROCM
96
+ case c10::ScalarType::BFloat16:
97
+ return CUDA_R_16BF;
98
+ #if defined(HIP_NEW_TYPE_ENUMS)
99
+ case c10::ScalarType::Float8_e4m3fnuz:
100
+ return HIP_R_8F_E4M3_FNUZ;
101
+ case c10::ScalarType::Float8_e5m2fnuz:
102
+ return HIP_R_8F_E5M2_FNUZ;
103
+ #else
104
+ case c10::ScalarType::Float8_e4m3fnuz:
105
+ return static_cast<hipDataType>(1000);
106
+ case c10::ScalarType::Float8_e5m2fnuz:
107
+ return static_cast<hipDataType>(1001);
108
+ #endif
109
+ #endif
110
+ default:
111
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.")
112
+ }
113
+ }
114
+
115
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/Exceptions.h>
4
+
5
+ #include <cuda.h>
6
+ #include <cuda_runtime.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ inline Device getDeviceFromPtr(void* ptr) {
11
+ cudaPointerAttributes attr{};
12
+
13
+ AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr));
14
+
15
+ #if !defined(USE_ROCM)
16
+ TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered,
17
+ "The specified pointer resides on host memory and is not registered with any CUDA device.");
18
+ #endif
19
+
20
+ return {c10::DeviceType::CUDA, static_cast<DeviceIndex>(attr.device)};
21
+ }
22
+
23
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/ATenCUDAGeneral.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <c10/core/impl/GPUTrace.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/cuda/CUDAGuard.h>
8
+ #include <ATen/cuda/Exceptions.h>
9
+ #include <c10/util/Exception.h>
10
+
11
+ #include <cuda_runtime_api.h>
12
+
13
+ #include <cstdint>
14
+ #include <utility>
15
+
16
+ namespace at::cuda {
17
+
18
+ /*
19
+ * CUDAEvents are movable not copyable wrappers around CUDA's events.
20
+ *
21
+ * CUDAEvents are constructed lazily when first recorded unless it is
22
+ * reconstructed from a cudaIpcEventHandle_t. The event has a device, and this
23
+ * device is acquired from the first recording stream. However, if reconstructed
24
+ * from a handle, the device should be explicitly specified; or if ipc_handle() is
25
+ * called before the event is ever recorded, it will use the current device.
26
+ * Later streams that record the event must match this device.
27
+ */
28
+ struct TORCH_CUDA_CPP_API CUDAEvent {
29
+ // Constructors
30
+ // Default value for `flags` is specified below - it's cudaEventDisableTiming
31
+ CUDAEvent() noexcept = default;
32
+ CUDAEvent(unsigned int flags) noexcept : flags_{flags} {}
33
+
34
+ CUDAEvent(
35
+ DeviceIndex device_index, const cudaIpcEventHandle_t* handle) {
36
+ device_index_ = device_index;
37
+ CUDAGuard guard(device_index_);
38
+
39
+ AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle));
40
+ is_created_ = true;
41
+ }
42
+
43
+ // Note: event destruction done on creating device to avoid creating a
44
+ // CUDA context on other devices.
45
+ ~CUDAEvent() {
46
+ try {
47
+ if (is_created_) {
48
+ CUDAGuard guard(device_index_);
49
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
50
+ if (C10_UNLIKELY(interp)) {
51
+ (*interp)->trace_gpu_event_deletion(reinterpret_cast<uintptr_t>(event_));
52
+ }
53
+ AT_CUDA_CHECK(cudaEventDestroy(event_));
54
+ }
55
+ } catch (...) { /* No throw */ }
56
+ }
57
+
58
+ CUDAEvent(const CUDAEvent&) = delete;
59
+ CUDAEvent& operator=(const CUDAEvent&) = delete;
60
+
61
+ CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); }
62
+ CUDAEvent& operator=(CUDAEvent&& other) noexcept {
63
+ if (this != &other) {
64
+ moveHelper(std::move(other));
65
+ }
66
+ return *this;
67
+ }
68
+
69
+ operator cudaEvent_t() const { return event(); }
70
+
71
+ // Less than operator (to allow use in sets)
72
+ friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) {
73
+ return left.event_ < right.event_;
74
+ }
75
+
76
+ optional<at::Device> device() const {
77
+ if (is_created_) {
78
+ return at::Device(at::kCUDA, device_index_);
79
+ } else {
80
+ return {};
81
+ }
82
+ }
83
+
84
+ bool isCreated() const { return is_created_; }
85
+ DeviceIndex device_index() const {return device_index_;}
86
+ cudaEvent_t event() const { return event_; }
87
+
88
+ // Note: cudaEventQuery can be safely called from any device
89
+ bool query() const {
90
+ if (!is_created_) {
91
+ return true;
92
+ }
93
+
94
+ cudaError_t err = cudaEventQuery(event_);
95
+ if (err == cudaSuccess) {
96
+ return true;
97
+ } else if (err != cudaErrorNotReady) {
98
+ C10_CUDA_CHECK(err);
99
+ } else {
100
+ // ignore and clear the error if not ready
101
+ (void)cudaGetLastError();
102
+ }
103
+
104
+ return false;
105
+ }
106
+
107
+ void record() { record(getCurrentCUDAStream()); }
108
+
109
+ void recordOnce(const CUDAStream& stream) {
110
+ if (!was_recorded_) record(stream);
111
+ }
112
+
113
+ // Note: cudaEventRecord must be called on the same device as the event.
114
+ void record(const CUDAStream& stream) {
115
+ if (!is_created_) {
116
+ createEvent(stream.device_index());
117
+ }
118
+
119
+ TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_,
120
+ " does not match recording stream's device ", stream.device_index(), ".");
121
+ CUDAGuard guard(device_index_);
122
+ AT_CUDA_CHECK(cudaEventRecord(event_, stream));
123
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
124
+ if (C10_UNLIKELY(interp)) {
125
+ (*interp)->trace_gpu_event_record(
126
+ reinterpret_cast<uintptr_t>(event_),
127
+ reinterpret_cast<uintptr_t>(stream.stream())
128
+ );
129
+ }
130
+ was_recorded_ = true;
131
+ }
132
+
133
+ // Note: cudaStreamWaitEvent must be called on the same device as the stream.
134
+ // The event has no actual GPU resources associated with it.
135
+ void block(const CUDAStream& stream) {
136
+ if (is_created_) {
137
+ CUDAGuard guard(stream.device_index());
138
+ AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0));
139
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
140
+ if (C10_UNLIKELY(interp)) {
141
+ (*interp)->trace_gpu_event_wait(
142
+ reinterpret_cast<uintptr_t>(event_),
143
+ reinterpret_cast<uintptr_t>(stream.stream())
144
+ );
145
+ }
146
+ }
147
+ }
148
+
149
+ // Note: cudaEventElapsedTime can be safely called from any device
150
+ float elapsed_time(const CUDAEvent& other) const {
151
+ TORCH_CHECK(is_created_ && other.isCreated(),
152
+ "Both events must be recorded before calculating elapsed time.");
153
+ float time_ms = 0;
154
+ // raise cudaErrorNotReady if either event is recorded but not yet completed
155
+ AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_));
156
+ return time_ms;
157
+ }
158
+
159
+ // Note: cudaEventSynchronize can be safely called from any device
160
+ void synchronize() const {
161
+ if (is_created_) {
162
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
163
+ if (C10_UNLIKELY(interp)) {
164
+ (*interp)->trace_gpu_event_synchronization(reinterpret_cast<uintptr_t>(event_));
165
+ }
166
+ AT_CUDA_CHECK(cudaEventSynchronize(event_));
167
+ }
168
+ }
169
+
170
+ // Note: cudaIpcGetEventHandle must be called on the same device as the event
171
+ void ipc_handle(cudaIpcEventHandle_t * handle) {
172
+ if (!is_created_) {
173
+ // this CUDAEvent object was initially constructed from flags but event_
174
+ // is not created yet.
175
+ createEvent(getCurrentCUDAStream().device_index());
176
+ }
177
+ CUDAGuard guard(device_index_);
178
+ AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_));
179
+ }
180
+
181
+ private:
182
+ unsigned int flags_ = cudaEventDisableTiming;
183
+ bool is_created_ = false;
184
+ bool was_recorded_ = false;
185
+ DeviceIndex device_index_ = -1;
186
+ cudaEvent_t event_{};
187
+
188
+ void createEvent(DeviceIndex device_index) {
189
+ device_index_ = device_index;
190
+ CUDAGuard guard(device_index_);
191
+ AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_));
192
+ const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
193
+ if (C10_UNLIKELY(interp)) {
194
+ (*interp)->trace_gpu_event_creation(reinterpret_cast<uintptr_t>(event_));
195
+ }
196
+ is_created_ = true;
197
+ }
198
+
199
+ void moveHelper(CUDAEvent&& other) {
200
+ std::swap(flags_, other.flags_);
201
+ std::swap(is_created_, other.is_created_);
202
+ std::swap(was_recorded_, other.was_recorded_);
203
+ std::swap(device_index_, other.device_index_);
204
+ std::swap(event_, other.event_);
205
+ }
206
+ };
207
+
208
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <ATen/cuda/PhiloxCudaState.h>
5
+ #include <ATen/Context.h>
6
+ #include <limits>
7
+ #include <atomic>
8
+
9
+ namespace at {
10
+ /**
11
+ * Note [CUDA Graph-safe RNG states]
12
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
+ *
14
+ * Strategy:
15
+ * ~~~~~~~~~
16
+ * (It helps to look at
17
+ * cuda/detail/PhiloxCudaStateRaw.cuh and
18
+ * cuda/detail/UnpackRaw.cuh
19
+ * while you read this.)
20
+ *
21
+ * A CUDA graph containing multiple RNG ops behaves like a
22
+ * single giant kernel from the perspective of ops external
23
+ * to the graph. During graph capture, logic in CUDAGeneratorImpl
24
+ * records the total of all offset increments that occur in the
25
+ * graphed region, and records the final total as the offset for
26
+ * the entire graph.
27
+ *
28
+ * When the graph reruns, the logic that reruns it
29
+ * increments this device's CUDA generator's offset
30
+ * by that total.
31
+ *
32
+ * Meanwhile, within the graph, at capture time, instead of
33
+ * populating PhiloxCudaStates with the uint64_t offset pulled
34
+ * directly from the global state, PhiloxCudaState uses a pointer
35
+ * to a one-element stream-local int64_t device tensor
36
+ * holding an initial offset value, and a uint64_t holding an
37
+ * intra-graph offset. (The intra-graph offset starts from zero
38
+ * when capture begins.) In each consumer kernel,
39
+ * at::cuda::philox::unpack computes the offset to use for this kernel
40
+ * as intra-graph offset + *initial offset.
41
+ *
42
+ * When the graph reruns, the logic that reruns it first
43
+ * fill_s the initial offset tensor with this device's
44
+ * CUDA generator's current offset.
45
+ *
46
+ * The control flow above ensures graphed execution is bitwise
47
+ * identical to eager execution as long as RNG ops are enqueued
48
+ * from a single thread, even if RNG ops and graphs containing
49
+ * RNG ops are enqueued and run simultaneously on multiple streams.
50
+ *
51
+ * Usage:
52
+ * ~~~~~~
53
+ * PhiloxCudaState in this file, and unpack() in
54
+ * cuda/CUDAGraphsUtils.cuh allow non-divergent use of
55
+ * CUDAGeneratorImpl whether graph capture is underway or not.
56
+ *
57
+ * Each PhiloxCudaState instance should be used for one and only one
58
+ * consumer kernel.
59
+ *
60
+ * Example (see e.g. native/cuda/Dropout.cu):
61
+ *
62
+ * #include <ATen/cuda/CUDAGeneratorImpl.h>
63
+ * #include <ATen/cuda/CUDAGraphsUtils.cuh>
64
+ *
65
+ * __global__ void kernel(..., PhiloxCudaState philox_args) {
66
+ * auto seeds = at::cuda::philox::unpack(philox_args);
67
+ * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
68
+ * curandStatePhilox4_32_10_t state;
69
+ * curand_init(std::get<0>(seeds), // seed
70
+ * idx, // per-thread subsequence
71
+ * std::get<1>(seeds), // offset in subsequence
72
+ * &state);
73
+ * ...
74
+ * }
75
+ *
76
+ * host_caller(...) {
77
+ * PhiloxCudaState rng_engine_inputs;
78
+ * {
79
+ * // See Note [Acquire lock when using random generators]
80
+ * std::lock_guard<std::mutex> lock(gen->mutex_);
81
+ *
82
+ * // gen could be HostState or DevState here! No divergent code needed!
83
+ * rng_engine_inputs = gen->philox_cuda_state(offset_increment);
84
+ * }
85
+ * kernel<<<...>>>(..., rng_engine_inputs);
86
+ * }
87
+ *
88
+ */
89
+
90
+ struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl {
91
+ // Constructors
92
+ CUDAGeneratorImpl(DeviceIndex device_index = -1);
93
+ ~CUDAGeneratorImpl() override = default;
94
+
95
+ // CUDAGeneratorImpl methods
96
+ std::shared_ptr<CUDAGeneratorImpl> clone() const;
97
+ void set_current_seed(uint64_t seed) override;
98
+ void set_offset(uint64_t offset) override;
99
+ uint64_t get_offset() const override;
100
+ uint64_t current_seed() const override;
101
+ uint64_t seed() override;
102
+ void set_state(const c10::TensorImpl& new_state) override;
103
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
104
+ void set_philox_offset_per_thread(uint64_t offset);
105
+ uint64_t philox_offset_per_thread() const;
106
+ void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph);
107
+ uint64_t capture_epilogue();
108
+ PhiloxCudaState philox_cuda_state(uint64_t increment);
109
+
110
+ bool reset_rnn_state() {
111
+ return !no_reset_rnn_state_.test_and_set();
112
+ }
113
+
114
+ // Temporarily accommodates call sites that use philox_engine_inputs.
115
+ // Allows incremental refactor of call sites to use philox_cuda_state.
116
+ std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
117
+
118
+ static c10::DeviceType device_type();
119
+
120
+ private:
121
+ CUDAGeneratorImpl* clone_impl() const override;
122
+ uint64_t seed_ = default_rng_seed_val;
123
+ uint64_t philox_offset_per_thread_ = 0;
124
+ int64_t* seed_extragraph_{};
125
+ int64_t* offset_extragraph_{};
126
+ uint32_t offset_intragraph_ = 0;
127
+ bool graph_expects_this_gen_ = false;
128
+ std::atomic_flag no_reset_rnn_state_;
129
+ };
130
+
131
+ namespace cuda::detail {
132
+
133
+ TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator(
134
+ DeviceIndex device_index = -1);
135
+ TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1);
136
+
137
+ } // namespace cuda::detail
138
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+
8
+ #include <mutex>
9
+
10
+ namespace at {
11
+
12
+ struct CUDAGeneratorImpl;
13
+
14
+ namespace cuda {
15
+
16
+ // Standalone way to get a unique mempool id usable as a pool=... argument
17
+ // to CUDAGraph::capture_begin
18
+ TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle();
19
+
20
+ struct TORCH_CUDA_CPP_API CUDAGraph {
21
+ CUDAGraph();
22
+ ~CUDAGraph();
23
+
24
+ static void inc_pending_event_queries();
25
+ static void dec_pending_event_queries();
26
+ static int num_pending_event_queries();
27
+ void capture_begin(MempoolId_t pool={0, 0}, cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal);
28
+ void capture_end();
29
+ void replay();
30
+ void reset();
31
+ MempoolId_t pool();
32
+ void enable_debug_mode();
33
+ void debug_dump(const std::string& debug_path);
34
+
35
+ protected:
36
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
37
+ cudaGraph_t graph_ = NULL;
38
+ cudaGraphExec_t graph_exec_ = NULL;
39
+ #endif
40
+
41
+ static std::atomic<int> pending_event_queries;
42
+
43
+ // internal states so reset() can do its best cleaning up
44
+ // Set to true in capture_end if cudaStreamEndCapture succeeded
45
+ // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate
46
+ // to create graph_exec_, then graph_ is deleted
47
+ bool has_graph_ = false;
48
+ // Set to true in capture_end if cudaGraphInstantiate succeeded
49
+ bool has_graph_exec_ = false;
50
+
51
+ // uuid of this instance's current capture, used to
52
+ // specify the pool.
53
+ CaptureId_t id_;
54
+
55
+ // the ID assigned by cuda during graph capture,
56
+ // used to identify when a stream is participating in capture
57
+ CaptureId_t capture_id_ = -1;
58
+
59
+ // uuid used to request a particular private mempool from CUDACachingAllocator.
60
+ // By default, this will be set to {id_, 0}.
61
+ //
62
+ // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_
63
+ // will be set to the other graph's mempool_id_, and therefore share a mempool with the
64
+ // other graph.
65
+ //
66
+ // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(),
67
+ // it will share a mempool with any other captures that used "pool=handle".
68
+ //
69
+ // Sharing a mempool across graphs saves memory, and it's safe if you
70
+ // know you'll replay those graphs in the same order you captured them.
71
+ MempoolId_t mempool_id_;
72
+
73
+ // Stream on which capture began
74
+ at::cuda::CUDAStream capture_stream_;
75
+
76
+ // Default generator on device where capture began
77
+ at::CUDAGeneratorImpl* capture_gen_;
78
+
79
+ // Device where capture occurred. Right now, for simplicity, we require all ops
80
+ // in a capture to run on the same device, but this is a limitation of CUDAGraph,
81
+ // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
82
+ // captures if needed.
83
+ int capture_dev_;
84
+
85
+ // RNG state trackers
86
+ at::Tensor seed_extragraph_;
87
+ at::Tensor offset_extragraph_;
88
+ uint64_t wholegraph_increment_;
89
+ };
90
+
91
+ } // namespace cuda
92
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAGeneratorImpl.h>
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <ATen/cuda/PhiloxUtils.cuh>
6
+ #include <ATen/cuda/detail/CUDAHooks.h>
7
+ #include <ATen/detail/CUDAHooksInterface.h>
8
+ #include <c10/core/StreamGuard.h>
9
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
10
+ #include <c10/cuda/CUDAGuard.h>
11
+
12
+ // c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.
13
+ // This file adds utils used by aten only.
14
+
15
+ namespace at::cuda {
16
+
17
+ using CaptureId_t = c10::cuda::CaptureId_t;
18
+ using CaptureStatus = c10::cuda::CaptureStatus;
19
+
20
+ // Use this version where you don't want to create a CUDA context if none exists.
21
+ inline CaptureStatus currentStreamCaptureStatus() {
22
+ #if !defined(USE_ROCM) || ROCM_VERSION >= 50300
23
+ // don't create a context if we don't have to
24
+ if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {
25
+ return c10::cuda::currentStreamCaptureStatusMayInitCtx();
26
+ } else {
27
+ return CaptureStatus::None;
28
+ }
29
+ #else
30
+ return CaptureStatus::None;
31
+ #endif
32
+ }
33
+
34
+ inline void assertNotCapturing(std::string attempt) {
35
+ auto status = currentStreamCaptureStatus();
36
+ TORCH_CHECK(status == CaptureStatus::None,
37
+ attempt,
38
+ " during CUDA graph capture. If you need this call to be captured, "
39
+ "please file an issue. "
40
+ "Current cudaStreamCaptureStatus: ",
41
+ status);
42
+ }
43
+
44
+ inline void errorIfCapturingCudnnBenchmark(std::string version_specific) {
45
+ auto status = currentStreamCaptureStatus();
46
+ TORCH_CHECK(status == CaptureStatus::None,
47
+ "Current cudaStreamCaptureStatus: ",
48
+ status,
49
+ "\nCapturing ",
50
+ version_specific,
51
+ "is prohibited. Possible causes of this error:\n"
52
+ "1. No warmup iterations occurred before capture.\n"
53
+ "2. The convolutions you're trying to capture use dynamic shapes, "
54
+ "in which case capturing them is generally prohibited.");
55
+ }
56
+
57
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/cuda/CUDASparse.h>
6
+
7
+ #include <c10/core/ScalarType.h>
8
+
9
+ #if defined(USE_ROCM)
10
+ #include <type_traits>
11
+ #endif
12
+
13
+ namespace at::cuda::sparse {
14
+
15
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
16
+ struct CuSparseDescriptorDeleter {
17
+ void operator()(T* x) {
18
+ if (x != nullptr) {
19
+ TORCH_CUDASPARSE_CHECK(destructor(x));
20
+ }
21
+ }
22
+ };
23
+
24
+ template <typename T, cusparseStatus_t (*destructor)(T*)>
25
+ class CuSparseDescriptor {
26
+ public:
27
+ T* descriptor() const {
28
+ return descriptor_.get();
29
+ }
30
+ T* descriptor() {
31
+ return descriptor_.get();
32
+ }
33
+
34
+ protected:
35
+ std::unique_ptr<T, CuSparseDescriptorDeleter<T, destructor>> descriptor_;
36
+ };
37
+
38
+ #if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
39
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
40
+ struct ConstCuSparseDescriptorDeleter {
41
+ void operator()(T* x) {
42
+ if (x != nullptr) {
43
+ TORCH_CUDASPARSE_CHECK(destructor(x));
44
+ }
45
+ }
46
+ };
47
+
48
+ template <typename T, cusparseStatus_t (*destructor)(const T*)>
49
+ class ConstCuSparseDescriptor {
50
+ public:
51
+ T* descriptor() const {
52
+ return descriptor_.get();
53
+ }
54
+ T* descriptor() {
55
+ return descriptor_.get();
56
+ }
57
+
58
+ protected:
59
+ std::unique_ptr<T, ConstCuSparseDescriptorDeleter<T, destructor>> descriptor_;
60
+ };
61
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS
62
+
63
+ #if defined(USE_ROCM)
64
+ using cusparseMatDescr = std::remove_pointer<hipsparseMatDescr_t>::type;
65
+ using cusparseDnMatDescr = std::remove_pointer<hipsparseDnMatDescr_t>::type;
66
+ using cusparseDnVecDescr = std::remove_pointer<hipsparseDnVecDescr_t>::type;
67
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
68
+ using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
69
+ using cusparseSpGEMMDescr = std::remove_pointer<hipsparseSpGEMMDescr_t>::type;
70
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
71
+ using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type;
72
+ using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type;
73
+ #endif
74
+ #endif
75
+
76
+ // NOTE: This is only needed for CUDA 11 and earlier, since CUDA 12 introduced
77
+ // API for const descriptors
78
+ cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr);
79
+
80
+ class TORCH_CUDA_CPP_API CuSparseMatDescriptor
81
+ : public CuSparseDescriptor<cusparseMatDescr, &cusparseDestroyMatDescr> {
82
+ public:
83
+ CuSparseMatDescriptor() {
84
+ cusparseMatDescr_t raw_descriptor;
85
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
86
+ descriptor_.reset(raw_descriptor);
87
+ }
88
+
89
+ CuSparseMatDescriptor(bool upper, bool unit) {
90
+ cusparseFillMode_t fill_mode =
91
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
92
+ cusparseDiagType_t diag_type =
93
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
94
+ cusparseMatDescr_t raw_descriptor;
95
+ TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
96
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode));
97
+ TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type));
98
+ descriptor_.reset(raw_descriptor);
99
+ }
100
+ };
101
+
102
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
103
+
104
+ class TORCH_CUDA_CPP_API CuSparseBsrsv2Info
105
+ : public CuSparseDescriptor<bsrsv2Info, &cusparseDestroyBsrsv2Info> {
106
+ public:
107
+ CuSparseBsrsv2Info() {
108
+ bsrsv2Info_t raw_descriptor;
109
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor));
110
+ descriptor_.reset(raw_descriptor);
111
+ }
112
+ };
113
+
114
+ class TORCH_CUDA_CPP_API CuSparseBsrsm2Info
115
+ : public CuSparseDescriptor<bsrsm2Info, &cusparseDestroyBsrsm2Info> {
116
+ public:
117
+ CuSparseBsrsm2Info() {
118
+ bsrsm2Info_t raw_descriptor;
119
+ TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor));
120
+ descriptor_.reset(raw_descriptor);
121
+ }
122
+ };
123
+
124
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
125
+
126
+ #if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
127
+
128
+ cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type);
129
+
130
+ #if AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS()
131
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
132
+ : public CuSparseDescriptor<cusparseDnMatDescr, &cusparseDestroyDnMat> {
133
+ public:
134
+ explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
135
+ };
136
+
137
+ class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
138
+ : public CuSparseDescriptor<const cusparseDnMatDescr, &destroyConstDnMat> {
139
+ public:
140
+ explicit CuSparseConstDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
141
+ cusparseDnMatDescr* unsafe_mutable_descriptor() const {
142
+ return const_cast<cusparseDnMatDescr*>(descriptor());
143
+ }
144
+ cusparseDnMatDescr* unsafe_mutable_descriptor() {
145
+ return const_cast<cusparseDnMatDescr*>(descriptor());
146
+ }
147
+ };
148
+
149
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
150
+ : public CuSparseDescriptor<cusparseDnVecDescr, &cusparseDestroyDnVec> {
151
+ public:
152
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
153
+ };
154
+
155
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
156
+ : public CuSparseDescriptor<cusparseSpMatDescr, &cusparseDestroySpMat> {};
157
+
158
+ #elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
159
+ class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
160
+ : public ConstCuSparseDescriptor<
161
+ cusparseDnMatDescr,
162
+ &cusparseDestroyDnMat> {
163
+ public:
164
+ explicit CuSparseDnMatDescriptor(
165
+ const Tensor& input,
166
+ int64_t batch_offset = -1);
167
+ };
168
+
169
+ class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
170
+ : public ConstCuSparseDescriptor<
171
+ const cusparseDnMatDescr,
172
+ &destroyConstDnMat> {
173
+ public:
174
+ explicit CuSparseConstDnMatDescriptor(
175
+ const Tensor& input,
176
+ int64_t batch_offset = -1);
177
+ cusparseDnMatDescr* unsafe_mutable_descriptor() const {
178
+ return const_cast<cusparseDnMatDescr*>(descriptor());
179
+ }
180
+ cusparseDnMatDescr* unsafe_mutable_descriptor() {
181
+ return const_cast<cusparseDnMatDescr*>(descriptor());
182
+ }
183
+ };
184
+
185
+ class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
186
+ : public ConstCuSparseDescriptor<
187
+ cusparseDnVecDescr,
188
+ &cusparseDestroyDnVec> {
189
+ public:
190
+ explicit CuSparseDnVecDescriptor(const Tensor& input);
191
+ };
192
+
193
+ class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
194
+ : public ConstCuSparseDescriptor<
195
+ cusparseSpMatDescr,
196
+ &cusparseDestroySpMat> {};
197
+ #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
198
+
199
+ class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor
200
+ : public CuSparseSpMatDescriptor {
201
+ public:
202
+ explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1);
203
+
204
+ std::tuple<int64_t, int64_t, int64_t> get_size() {
205
+ int64_t rows, cols, nnz;
206
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize(
207
+ this->descriptor(),
208
+ &rows,
209
+ &cols,
210
+ &nnz));
211
+ return std::make_tuple(rows, cols, nnz);
212
+ }
213
+
214
+ void set_tensor(const Tensor& input) {
215
+ auto crow_indices = input.crow_indices();
216
+ auto col_indices = input.col_indices();
217
+ auto values = input.values();
218
+
219
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous());
220
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous());
221
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous());
222
+ TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers(
223
+ this->descriptor(),
224
+ crow_indices.data_ptr(),
225
+ col_indices.data_ptr(),
226
+ values.data_ptr()));
227
+ }
228
+
229
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
230
+ void set_mat_fill_mode(bool upper) {
231
+ cusparseFillMode_t fill_mode =
232
+ upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
233
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
234
+ this->descriptor(),
235
+ CUSPARSE_SPMAT_FILL_MODE,
236
+ &fill_mode,
237
+ sizeof(fill_mode)));
238
+ }
239
+
240
+ void set_mat_diag_type(bool unit) {
241
+ cusparseDiagType_t diag_type =
242
+ unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
243
+ TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
244
+ this->descriptor(),
245
+ CUSPARSE_SPMAT_DIAG_TYPE,
246
+ &diag_type,
247
+ sizeof(diag_type)));
248
+ }
249
+ #endif
250
+ };
251
+
252
+ #if AT_USE_CUSPARSE_GENERIC_SPSV()
253
+ class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor
254
+ : public CuSparseDescriptor<cusparseSpSVDescr, &cusparseSpSV_destroyDescr> {
255
+ public:
256
+ CuSparseSpSVDescriptor() {
257
+ cusparseSpSVDescr_t raw_descriptor;
258
+ TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor));
259
+ descriptor_.reset(raw_descriptor);
260
+ }
261
+ };
262
+ #endif
263
+
264
+ #if AT_USE_CUSPARSE_GENERIC_SPSM()
265
+ class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor
266
+ : public CuSparseDescriptor<cusparseSpSMDescr, &cusparseSpSM_destroyDescr> {
267
+ public:
268
+ CuSparseSpSMDescriptor() {
269
+ cusparseSpSMDescr_t raw_descriptor;
270
+ TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor));
271
+ descriptor_.reset(raw_descriptor);
272
+ }
273
+ };
274
+ #endif
275
+
276
+ #if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || !defined(USE_ROCM)
277
+ class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor
278
+ : public CuSparseDescriptor<cusparseSpGEMMDescr, &cusparseSpGEMM_destroyDescr> {
279
+ public:
280
+ CuSparseSpGEMMDescriptor() {
281
+ cusparseSpGEMMDescr_t raw_descriptor;
282
+ TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor));
283
+ descriptor_.reset(raw_descriptor);
284
+ }
285
+ };
286
+ #endif
287
+
288
+ #endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
289
+
290
+ } // namespace at::cuda::sparse
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAStream.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ //
9
+ // A caching allocator for CUDA host allocations (pinned memory).
10
+ //
11
+ // This provides a drop-in replacement for THCudaHostAllocator, which re-uses
12
+ // freed pinned (page-locked) memory allocations. This avoids device
13
+ // synchronizations due to cudaFreeHost calls.
14
+ //
15
+ // To ensure correct behavior, THCCachingHostAllocator_recordEvent must be
16
+ // called anytime a pointer from this allocator is used in a cudaMemcpyAsync
17
+ // call between host and device, and passed the corresponding context from the
18
+ // allocation. This is currently invoked by at::native::copy_kernel_cuda.
19
+ //
20
+ // Note that this allocator does not split larger allocations into smaller
21
+ // blocks, unlike the caching device allocator.
22
+ //
23
+ TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator();
24
+
25
+ // Records an event in the specified stream. The allocation corresponding to the
26
+ // input `ptr`/`ctx` will not be re-used until the event has occurred.
27
+ TORCH_CUDA_CPP_API bool
28
+ CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream);
29
+
30
+ // Releases cached pinned memory allocations via cudaHostFree
31
+ TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache();
32
+
33
+ inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) {
34
+ return getCachingHostAllocator()->allocate(size);
35
+ }
36
+
37
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cublas_v2.h>
4
+ #include <cusparse.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ #ifdef CUDART_VERSION
8
+ #include <cusolver_common.h>
9
+ #endif
10
+
11
+ #include <ATen/Context.h>
12
+ #include <c10/util/Exception.h>
13
+ #include <c10/cuda/CUDAException.h>
14
+
15
+
16
+ namespace c10 {
17
+
18
+ class CuDNNError : public c10::Error {
19
+ using Error::Error;
20
+ };
21
+
22
+ } // namespace c10
23
+
24
+ #define AT_CUDNN_FRONTEND_CHECK(EXPR, ...) \
25
+ do { \
26
+ auto error_object = EXPR; \
27
+ if (!error_object.is_good()) { \
28
+ TORCH_CHECK_WITH(CuDNNError, false, \
29
+ "cuDNN Frontend error: ", error_object.get_message()); \
30
+ } \
31
+ } while (0) \
32
+
33
+ #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__)
34
+
35
+ // See Note [CHECK macro]
36
+ #define AT_CUDNN_CHECK(EXPR, ...) \
37
+ do { \
38
+ cudnnStatus_t status = EXPR; \
39
+ if (status != CUDNN_STATUS_SUCCESS) { \
40
+ if (status == CUDNN_STATUS_NOT_SUPPORTED) { \
41
+ TORCH_CHECK_WITH(CuDNNError, false, \
42
+ "cuDNN error: ", \
43
+ cudnnGetErrorString(status), \
44
+ ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \
45
+ } else { \
46
+ TORCH_CHECK_WITH(CuDNNError, false, \
47
+ "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \
48
+ } \
49
+ } \
50
+ } while (0)
51
+
52
+ namespace at::cuda::blas {
53
+ C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error);
54
+ } // namespace at::cuda::blas
55
+
56
+ #define TORCH_CUDABLAS_CHECK(EXPR) \
57
+ do { \
58
+ cublasStatus_t __err = EXPR; \
59
+ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \
60
+ "CUDA error: ", \
61
+ at::cuda::blas::_cublasGetErrorEnum(__err), \
62
+ " when calling `" #EXPR "`"); \
63
+ } while (0)
64
+
65
+ const char *cusparseGetErrorString(cusparseStatus_t status);
66
+
67
+ #define TORCH_CUDASPARSE_CHECK(EXPR) \
68
+ do { \
69
+ cusparseStatus_t __err = EXPR; \
70
+ TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \
71
+ "CUDA error: ", \
72
+ cusparseGetErrorString(__err), \
73
+ " when calling `" #EXPR "`"); \
74
+ } while (0)
75
+
76
+ // cusolver related headers are only supported on cuda now
77
+ #ifdef CUDART_VERSION
78
+
79
+ namespace at::cuda::solver {
80
+ C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status);
81
+
82
+ constexpr const char* _cusolver_backend_suggestion = \
83
+ "If you keep seeing this error, you may use " \
84
+ "`torch.backends.cuda.preferred_linalg_library()` to try " \
85
+ "linear algebra operators with other supported backends. " \
86
+ "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library";
87
+
88
+ } // namespace at::cuda::solver
89
+
90
+ // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
91
+ // When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
92
+ #define TORCH_CUSOLVER_CHECK(EXPR) \
93
+ do { \
94
+ cusolverStatus_t __err = EXPR; \
95
+ if ((CUDA_VERSION < 11500 && \
96
+ __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \
97
+ (CUDA_VERSION >= 11500 && \
98
+ __err == CUSOLVER_STATUS_INVALID_VALUE)) { \
99
+ TORCH_CHECK_LINALG( \
100
+ false, \
101
+ "cusolver error: ", \
102
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
103
+ ", when calling `" #EXPR "`", \
104
+ ". This error may appear if the input matrix contains NaN. ", \
105
+ at::cuda::solver::_cusolver_backend_suggestion); \
106
+ } else { \
107
+ TORCH_CHECK( \
108
+ __err == CUSOLVER_STATUS_SUCCESS, \
109
+ "cusolver error: ", \
110
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
111
+ ", when calling `" #EXPR "`. ", \
112
+ at::cuda::solver::_cusolver_backend_suggestion); \
113
+ } \
114
+ } while (0)
115
+
116
+ #else
117
+ #define TORCH_CUSOLVER_CHECK(EXPR) EXPR
118
+ #endif
119
+
120
+ #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR)
121
+
122
+ // For CUDA Driver API
123
+ //
124
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
125
+ // in ATen, and we need to use its nvrtcGetErrorString.
126
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
127
+ #if !defined(USE_ROCM)
128
+
129
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
130
+ do { \
131
+ CUresult __err = EXPR; \
132
+ if (__err != CUDA_SUCCESS) { \
133
+ const char* err_str; \
134
+ CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
135
+ if (get_error_str_err != CUDA_SUCCESS) { \
136
+ AT_ERROR("CUDA driver error: unknown error"); \
137
+ } else { \
138
+ AT_ERROR("CUDA driver error: ", err_str); \
139
+ } \
140
+ } \
141
+ } while (0)
142
+
143
+ #else
144
+
145
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
146
+ do { \
147
+ CUresult __err = EXPR; \
148
+ if (__err != CUDA_SUCCESS) { \
149
+ AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \
150
+ } \
151
+ } while (0)
152
+
153
+ #endif
154
+
155
+ // For CUDA NVRTC
156
+ //
157
+ // Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE,
158
+ // incorrectly produces the error string "NVRTC unknown error."
159
+ // The following maps it correctly.
160
+ //
161
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
162
+ // in ATen, and we need to use its nvrtcGetErrorString.
163
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
164
+ #define AT_CUDA_NVRTC_CHECK(EXPR) \
165
+ do { \
166
+ nvrtcResult __err = EXPR; \
167
+ if (__err != NVRTC_SUCCESS) { \
168
+ if (static_cast<int>(__err) != 7) { \
169
+ AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
170
+ } else { \
171
+ AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
172
+ } \
173
+ } \
174
+ } while (0)
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <limits.h>
5
+ #include <math.h>
6
+ #include <float.h>
7
+
8
+ // NumericLimits.cuh is a holder for numeric limits definitions of commonly used
9
+ // types. This header is very specific to ROCm HIP and may be removed in the future.
10
+ // This header is derived from the legacy THCNumerics.cuh.
11
+
12
+ // The lower_bound and upper_bound constants are same as lowest and max for
13
+ // integral types, but are -inf and +inf for floating point types. They are
14
+ // useful in implementing min, max, etc.
15
+
16
+ namespace at {
17
+
18
+ template <typename T>
19
+ struct numeric_limits {
20
+ };
21
+
22
+ // WARNING: the following at::numeric_limits definitions are there only to support
23
+ // HIP compilation for the moment. Use std::numeric_limits if you are not
24
+ // compiling for ROCm.
25
+ // from @colesbury: "The functions on numeric_limits aren't marked with
26
+ // __device__ which is why they don't work with ROCm. CUDA allows them
27
+ // because they're constexpr."
28
+
29
+ namespace {
30
+ // ROCm doesn't like INFINITY too.
31
+ constexpr double inf = INFINITY;
32
+ }
33
+
34
+ template <>
35
+ struct numeric_limits<bool> {
36
+ static inline __host__ __device__ bool lowest() { return false; }
37
+ static inline __host__ __device__ bool max() { return true; }
38
+ static inline __host__ __device__ bool lower_bound() { return false; }
39
+ static inline __host__ __device__ bool upper_bound() { return true; }
40
+ };
41
+
42
+ template <>
43
+ struct numeric_limits<uint8_t> {
44
+ static inline __host__ __device__ uint8_t lowest() { return 0; }
45
+ static inline __host__ __device__ uint8_t max() { return UINT8_MAX; }
46
+ static inline __host__ __device__ uint8_t lower_bound() { return 0; }
47
+ static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; }
48
+ };
49
+
50
+ template <>
51
+ struct numeric_limits<int8_t> {
52
+ static inline __host__ __device__ int8_t lowest() { return INT8_MIN; }
53
+ static inline __host__ __device__ int8_t max() { return INT8_MAX; }
54
+ static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; }
55
+ static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
56
+ };
57
+
58
+ template <>
59
+ struct numeric_limits<int16_t> {
60
+ static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
61
+ static inline __host__ __device__ int16_t max() { return INT16_MAX; }
62
+ static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; }
63
+ static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
64
+ };
65
+
66
+ template <>
67
+ struct numeric_limits<int32_t> {
68
+ static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
69
+ static inline __host__ __device__ int32_t max() { return INT32_MAX; }
70
+ static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; }
71
+ static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
72
+ };
73
+
74
+ template <>
75
+ struct numeric_limits<int64_t> {
76
+ #ifdef _MSC_VER
77
+ static inline __host__ __device__ int64_t lowest() { return _I64_MIN; }
78
+ static inline __host__ __device__ int64_t max() { return _I64_MAX; }
79
+ static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; }
80
+ static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; }
81
+ #else
82
+ static inline __host__ __device__ int64_t lowest() { return INT64_MIN; }
83
+ static inline __host__ __device__ int64_t max() { return INT64_MAX; }
84
+ static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; }
85
+ static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; }
86
+ #endif
87
+ };
88
+
89
+ template <>
90
+ struct numeric_limits<at::Half> {
91
+ static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); }
92
+ static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); }
93
+ static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); }
94
+ static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); }
95
+ };
96
+
97
+ template <>
98
+ struct numeric_limits<at::BFloat16> {
99
+ static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); }
100
+ static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); }
101
+ static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); }
102
+ static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); }
103
+ };
104
+
105
+ template <>
106
+ struct numeric_limits<float> {
107
+ static inline __host__ __device__ float lowest() { return -FLT_MAX; }
108
+ static inline __host__ __device__ float max() { return FLT_MAX; }
109
+ static inline __host__ __device__ float lower_bound() { return -static_cast<float>(inf); }
110
+ static inline __host__ __device__ float upper_bound() { return static_cast<float>(inf); }
111
+ };
112
+
113
+ template <>
114
+ struct numeric_limits<double> {
115
+ static inline __host__ __device__ double lowest() { return -DBL_MAX; }
116
+ static inline __host__ __device__ double max() { return DBL_MAX; }
117
+ static inline __host__ __device__ double lower_bound() { return -inf; }
118
+ static inline __host__ __device__ double upper_bound() { return inf; }
119
+ };
120
+
121
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <cstdint>
3
+
4
+ namespace at::cuda {
5
+ namespace detail {
6
+ void init_p2p_access_cache(int64_t num_devices);
7
+ }
8
+
9
+ TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev);
10
+
11
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <ATen/cuda/CachingHostAllocator.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() {
9
+ return getCachingHostAllocator();
10
+ }
11
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <cstdint>
4
+
5
+ namespace at::cuda {
6
+
7
+ // enqueues a kernel that spins for the specified number of cycles
8
+ TORCH_CUDA_CU_API void sleep(int64_t cycles);
9
+
10
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <c10/cuda/CUDACachingAllocator.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ /// Allocator for Thrust to re-route its internal device allocations
9
+ /// to the THC allocator
10
+ class ThrustAllocator {
11
+ public:
12
+ typedef char value_type;
13
+
14
+ char* allocate(std::ptrdiff_t size) {
15
+ return static_cast<char*>(c10::cuda::CUDACachingAllocator::raw_alloc(size));
16
+ }
17
+
18
+ void deallocate(char* p, size_t size) {
19
+ c10::cuda::CUDACachingAllocator::raw_delete(p);
20
+ }
21
+ };
22
+
23
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cuda/cub.h>
3
+
4
+ #include <cstddef>
5
+ #include <type_traits>
6
+ #include <iterator>
7
+ #include <limits>
8
+
9
+ #include <ATen/cuda/cub_definitions.cuh>
10
+
11
+ #if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
12
+
13
+ #include <cub/cub.cuh>
14
+
15
+ #else
16
+
17
+ // include cub in a safe manner, see:
18
+ // https://github.com/pytorch/pytorch/pull/55292
19
+ #undef CUB_NS_POSTFIX //undef to avoid redefinition warnings
20
+ #undef CUB_NS_PREFIX
21
+ #undef CUB_NS_QUALIFIER
22
+ #define CUB_NS_PREFIX namespace at_cuda_detail {
23
+ #define CUB_NS_POSTFIX }
24
+ #define CUB_NS_QUALIFIER ::at_cuda_detail::cub
25
+ #include <cub/cub.cuh>
26
+ #undef CUB_NS_POSTFIX
27
+ #undef CUB_NS_PREFIX
28
+ #undef CUB_NS_QUALIFIER
29
+
30
+ #endif
31
+
32
+ #include <ATen/cuda/Exceptions.h>
33
+ #include <c10/cuda/CUDACachingAllocator.h>
34
+ #include <c10/cuda/CUDAStream.h>
35
+
36
+ // handle the temporary storage and 'twice' calls for cub API
37
+ #define CUB_WRAPPER(func, ...) do { \
38
+ size_t temp_storage_bytes = 0; \
39
+ func(nullptr, temp_storage_bytes, __VA_ARGS__); \
40
+ auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \
41
+ auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \
42
+ func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \
43
+ AT_CUDA_CHECK(cudaGetLastError()); \
44
+ } while (false)
45
+
46
+ #ifdef USE_ROCM
47
+ #define NO_ROCM(x)
48
+ #define ROCM_HIPCUB(x) ::hipcub
49
+ #else
50
+ #define NO_ROCM(x) x
51
+ #define ROCM_HIPCUB(x) x
52
+ #endif
53
+
54
+ #if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || \
55
+ (defined(USE_ROCM) && ROCM_VERSION >= 40500)
56
+
57
+ #if !defined(USE_ROCM)
58
+ namespace at_cuda_detail {
59
+ #endif
60
+
61
+ // backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16
62
+
63
+ template <>
64
+ struct ROCM_HIPCUB(cub)::FpLimits<c10::BFloat16>
65
+ {
66
+ static __host__ __device__ __forceinline__ c10::BFloat16 Max() {
67
+ unsigned short max_word = 0x7F7F;
68
+ return reinterpret_cast<c10::BFloat16&>(max_word);
69
+ }
70
+
71
+ static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() {
72
+ unsigned short lowest_word = 0xFF7F;
73
+ return reinterpret_cast<c10::BFloat16&>(lowest_word);
74
+ }
75
+ };
76
+
77
+ template <>
78
+ struct ROCM_HIPCUB(cub)::NumericTraits<c10::BFloat16>:
79
+ ROCM_HIPCUB(cub)::BaseTraits<ROCM_HIPCUB(cub)::FLOATING_POINT, true, false, unsigned short, c10::BFloat16> {};
80
+
81
+ #if !defined(USE_ROCM)
82
+ } // namespace at_cuda_detail
83
+ #endif
84
+
85
+ #endif
86
+
87
+ #if !defined(USE_ROCM)
88
+ namespace at::native {
89
+ namespace cub = ::at_cuda_detail::cub;
90
+ } // namespace at::native
91
+ #endif
92
+
93
+ namespace at::cuda::cub {
94
+
95
+ namespace detail {
96
+
97
+ template<typename T>
98
+ struct cuda_type {
99
+ using type = T;
100
+ };
101
+ template<>
102
+ struct cuda_type<c10::Half> {
103
+ using type = __half;
104
+ };
105
+
106
+ #if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16()
107
+
108
+ template<>
109
+ struct cuda_type<c10::BFloat16> {
110
+ using type = __nv_bfloat16;
111
+ };
112
+
113
+ #elif (defined(USE_ROCM) && ROCM_VERSION >= 40500)
114
+
115
+ template<>
116
+ struct cuda_type<c10::BFloat16> {
117
+ using type = hip_bfloat16;
118
+ };
119
+
120
+ #endif
121
+
122
+ } // namespace detail
123
+
124
+ template<typename key_t, typename value_t, typename OffsetIteratorT>
125
+ inline void segmented_sort_pairs(
126
+ const key_t *keys_in, key_t *keys_out,
127
+ const value_t *values_in, value_t *values_out,
128
+ int64_t num_elements, int64_t num_segments,
129
+ OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
130
+ bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8
131
+ ) {
132
+ TORCH_CHECK(num_elements <= std::numeric_limits<int>::max(),
133
+ "cub sort does not support sorting more than INT_MAX elements");
134
+ TORCH_CHECK(num_segments <= std::numeric_limits<int>::max(),
135
+ "cub sort does not support sorting more than INT_MAX elements");
136
+ using key_t_ = typename detail::cuda_type<key_t>::type;
137
+
138
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
139
+ c10::DataPtr keys_out_owner;
140
+
141
+ if (keys_out == nullptr) {
142
+ keys_out_owner = allocator->allocate(num_elements * sizeof(key_t));
143
+ keys_out = reinterpret_cast<key_t *>(keys_out_owner.get());
144
+ }
145
+
146
+ const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
147
+ key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
148
+
149
+ if (descending) {
150
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending,
151
+ keys_in_, keys_out_, values_in, values_out,
152
+ num_elements, num_segments, begin_offsets, end_offsets,
153
+ begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
154
+ } else {
155
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs,
156
+ keys_in_, keys_out_, values_in, values_out,
157
+ num_elements, num_segments, begin_offsets, end_offsets,
158
+ begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
159
+ }
160
+ }
161
+
162
+ #if CUB_SUPPORTS_UNIQUE_BY_KEY()
163
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename KeysOutputIteratorT, typename ValuesOutputIteratorT, typename NumSelectedIteratorT>
164
+ inline void unique_by_key(
165
+ KeysInputIteratorT keys_in, ValuesInputIteratorT values_in,
166
+ KeysOutputIteratorT keys_out, ValuesOutputIteratorT values_out,
167
+ NumSelectedIteratorT num_selected, int64_t num_input_items)
168
+ {
169
+ // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed.
170
+ constexpr bool null_keys_out = std::is_same<KeysOutputIteratorT, std::nullptr_t>::value;
171
+ using KeyT = typename std::iterator_traits<KeysInputIteratorT>::value_type;
172
+ using RealKeysOutputIteratorT = typename std::conditional<null_keys_out, KeyT *, KeysOutputIteratorT>::type;
173
+ RealKeysOutputIteratorT keys_out_;
174
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
175
+ c10::DataPtr keys_out_owner;
176
+ if constexpr (null_keys_out) {
177
+ keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT));
178
+ keys_out_ = static_cast<KeyT *>(keys_out_owner.get());
179
+ } else {
180
+ keys_out_ = keys_out;
181
+ }
182
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey,
183
+ keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream());
184
+ }
185
+ #endif
186
+
187
+ namespace impl {
188
+
189
+ template<typename InputIteratorT1, typename InputIteratorT2, typename OutputIteratorT, class ScanOpT>
190
+ C10_LAUNCH_BOUNDS_1(1)
191
+ __global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){
192
+ // NOTE: out here not the final scan output, but an intermediate of the accumulation type.
193
+ using acc_t = typename std::iterator_traits<OutputIteratorT>::value_type;
194
+ *out = scan_op(static_cast<acc_t>(*a), static_cast<acc_t>(*b));
195
+ }
196
+
197
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
198
+ template<typename ValueT, typename InputIteratorT>
199
+ struct chained_iterator {
200
+ using iterator_category = std::random_access_iterator_tag;
201
+ using difference_type = std::ptrdiff_t;
202
+ using value_type = ValueT;
203
+ using pointer = ValueT*;
204
+ using reference = ValueT&;
205
+
206
+ InputIteratorT iter;
207
+ ValueT *first;
208
+ difference_type offset = 0;
209
+
210
+ __device__ ValueT operator[](difference_type i) {
211
+ i += offset;
212
+ if (i == 0) {
213
+ return *first;
214
+ } else {
215
+ return ValueT(iter[i - 1]);
216
+ }
217
+ }
218
+ __device__ chained_iterator operator+(difference_type i) {
219
+ return chained_iterator{iter, first, i};
220
+ }
221
+ __device__ ValueT operator*() {
222
+ return (*this)[0];
223
+ }
224
+ };
225
+ #endif
226
+
227
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
228
+ // so split at int_max/2
229
+ constexpr int max_cub_size = std::numeric_limits<int>::max() / 2 + 1; // 2**30
230
+ }
231
+
232
+ // non synchronizing cub call
233
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
234
+ // so split at int_max/2
235
+ template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, int max_cub_size=impl::max_cub_size>
236
+ inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
237
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 50000)
238
+ //For ROCm, use hipCUB chained iterators
239
+ CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan,
240
+ input,
241
+ output,
242
+ scan_op,
243
+ num_items,
244
+ at::cuda::getCurrentCUDAStream());
245
+ C10_HIP_KERNEL_LAUNCH_CHECK();
246
+ #else
247
+ // non synchronizing cub call
248
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
249
+ // so split at int_max/2
250
+ int size_cub = std::min<int64_t>(num_items, max_cub_size);
251
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
252
+ input,
253
+ output,
254
+ scan_op,
255
+ size_cub,
256
+ at::cuda::getCurrentCUDAStream());
257
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
258
+ using input_t = typename std::iterator_traits<InputIteratorT>::value_type;
259
+ for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
260
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
261
+ c10::DataPtr first_elem = allocator->allocate(sizeof(input_t));
262
+ auto first_elem_ptr = reinterpret_cast<input_t *>(first_elem.get());
263
+
264
+ size_cub = std::min<int64_t>(num_items - i, max_cub_size);
265
+ impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
266
+ output + i - 1,
267
+ input + i,
268
+ first_elem_ptr,
269
+ scan_op);
270
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
271
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
272
+ using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator<InputIteratorT>;
273
+ using tuple = typename ArgIndexInputIterator::value_type;
274
+ auto input_iter_transform = [=] __device__ (const tuple &x)->input_t {
275
+ if (x.key == 0) {
276
+ return *first_elem_ptr;
277
+ } else {
278
+ return x.value;
279
+ }
280
+ };
281
+ auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator<input_t, decltype(input_iter_transform), ArgIndexInputIterator>(
282
+ ArgIndexInputIterator(input + i), input_iter_transform);
283
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
284
+ input_,
285
+ output + i,
286
+ scan_op,
287
+ size_cub,
288
+ at::cuda::getCurrentCUDAStream());
289
+ #else
290
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
291
+ input + i + 1,
292
+ output + i,
293
+ scan_op,
294
+ ::at_cuda_detail::cub::FutureValue<input_t>(first_elem_ptr),
295
+ size_cub,
296
+ at::cuda::getCurrentCUDAStream());
297
+ #endif
298
+ }
299
+ #endif
300
+ }
301
+
302
+ template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitValueT, int max_cub_size=impl::max_cub_size>
303
+ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) {
304
+ #if defined(USE_ROCM) && (ROCM_VERSION >= 50000)
305
+ //For ROCm, use hipCUB chained iterators
306
+ CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan,
307
+ input,
308
+ output,
309
+ scan_op,
310
+ init_value,
311
+ num_items,
312
+ at::cuda::getCurrentCUDAStream());
313
+ C10_HIP_KERNEL_LAUNCH_CHECK();
314
+ #else
315
+ // non synchronizing cub call
316
+ // even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
317
+ // so split at int_max/2
318
+ int size_cub = std::min<int64_t>(num_items, max_cub_size);
319
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
320
+ input,
321
+ output,
322
+ scan_op,
323
+ init_value,
324
+ size_cub,
325
+ at::cuda::getCurrentCUDAStream());
326
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
327
+ for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
328
+ auto allocator = c10::cuda::CUDACachingAllocator::get();
329
+ c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT));
330
+ auto first_elem_ptr = reinterpret_cast<InitValueT *>(first_elem.get());
331
+
332
+ size_cub = std::min<int64_t>(num_items - i, max_cub_size);
333
+ impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
334
+ output + i - 1,
335
+ input + i - 1,
336
+ first_elem_ptr,
337
+ scan_op);
338
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
339
+ #if !CUB_SUPPORTS_FUTURE_VALUE()
340
+ auto input_ = impl::chained_iterator<InitValueT, InputIteratorT>{
341
+ input + i, first_elem_ptr};
342
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
343
+ input_,
344
+ output + i,
345
+ scan_op,
346
+ size_cub,
347
+ at::cuda::getCurrentCUDAStream());
348
+ #else
349
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
350
+ input + i,
351
+ output + i,
352
+ scan_op,
353
+ ::at_cuda_detail::cub::FutureValue<InitValueT>(first_elem_ptr),
354
+ size_cub,
355
+ at::cuda::getCurrentCUDAStream());
356
+ #endif
357
+ }
358
+ #endif
359
+ }
360
+
361
+ #if CUB_SUPPORTS_SCAN_BY_KEY()
362
+
363
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT>
364
+ inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) {
365
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
366
+ "cub InclusiveSumByKey does not support more than INT_MAX elements");
367
+ CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey,
368
+ keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
369
+ }
370
+
371
+ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT, typename ScanOpT>
372
+ inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
373
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
374
+ "cub InclusiveSumByKey does not support more than INT_MAX elements");
375
+ CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey,
376
+ keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
377
+ }
378
+
379
+ #endif
380
+
381
+ template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT>
382
+ void unique(InputIteratorT input, OutputIteratorT output,
383
+ NumSelectedIteratorT num_selected_out, int64_t num_items) {
384
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
385
+ "cub unique does not support more than INT_MAX elements");
386
+ CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique,
387
+ input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream());
388
+ }
389
+
390
+ template <typename InputIteratorT, typename OutputIteratorT, typename CountsOutputIteratorT,
391
+ typename LengthOutputIteratorT>
392
+ void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out,
393
+ LengthOutputIteratorT length_out, int64_t num_items) {
394
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
395
+ "cub run_length_encode does not support more than INT_MAX elements");
396
+ CUB_WRAPPER(
397
+ NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode,
398
+ input, output, counts_out, length_out, num_items,
399
+ at::cuda::getCurrentCUDAStream());
400
+ }
401
+
402
+ template <typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T>
403
+ void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) {
404
+ TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
405
+ "cub reduce does not support more than INT_MAX elements");
406
+ CUB_WRAPPER(
407
+ NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce,
408
+ input, output, num_items, op, init,
409
+ at::cuda::getCurrentCUDAStream());
410
+
411
+ }
412
+
413
+ } // namespace at::cuda::cub
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(USE_ROCM)
4
+ #include <cuda.h> // for CUDA_VERSION
5
+ #endif
6
+
7
+ #if !defined(USE_ROCM)
8
+ #include <cub/version.cuh>
9
+ #else
10
+ #define CUB_VERSION 0
11
+ #endif
12
+
13
+ // cub sort support for __nv_bfloat16 is added to cub 1.13 in:
14
+ // https://github.com/NVIDIA/cub/pull/306
15
+ #if CUB_VERSION >= 101300
16
+ #define CUB_SUPPORTS_NV_BFLOAT16() true
17
+ #else
18
+ #define CUB_SUPPORTS_NV_BFLOAT16() false
19
+ #endif
20
+
21
+ // cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
22
+ // https://github.com/NVIDIA/cub/pull/326
23
+ // CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
24
+ // starting from CUDA 11.5
25
+ #if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
26
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
27
+ #else
28
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
29
+ #endif
30
+
31
+ // cub support for UniqueByKey is added to cub 1.16 in:
32
+ // https://github.com/NVIDIA/cub/pull/405
33
+ #if CUB_VERSION >= 101600
34
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() true
35
+ #else
36
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() false
37
+ #endif
38
+
39
+ // cub support for scan by key is added to cub 1.15
40
+ // in https://github.com/NVIDIA/cub/pull/376
41
+ #if CUB_VERSION >= 101500
42
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 1
43
+ #else
44
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 0
45
+ #endif
46
+
47
+ // cub support for cub::FutureValue is added to cub 1.15 in:
48
+ // https://github.com/NVIDIA/cub/pull/305
49
+ #if CUB_VERSION >= 101500
50
+ #define CUB_SUPPORTS_FUTURE_VALUE() true
51
+ #else
52
+ #define CUB_SUPPORTS_FUTURE_VALUE() false
53
+ #endif
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ #if AT_USE_JITERATOR()
5
+
6
+ #include <ATen/native/TensorIterator.h>
7
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
8
+ #include <ATen/native/cuda/jit_utils.h>
9
+ #include <ATen/native/cuda/MemoryAccess.cuh>
10
+ #include <ATen/native/cuda/JitLoops.cuh>
11
+
12
+ #include <string>
13
+ #include <variant>
14
+ #include <vector>
15
+
16
+ namespace at::native {
17
+
18
+
19
+ #define AT_FOR_8_CASES(_) \
20
+ _(1) \
21
+ _(2) \
22
+ _(3) \
23
+ _(4) \
24
+ _(5) \
25
+ _(6) \
26
+ _(7) \
27
+ _(8)
28
+
29
+ #define AT_FOR_8_CASES_WITH_COMMA(_) \
30
+ _(1) , \
31
+ _(2) , \
32
+ _(3) , \
33
+ _(4) , \
34
+ _(5) , \
35
+ _(6) , \
36
+ _(7) , \
37
+ _(8)
38
+
39
+ c10::SmallVector<std::string> get_extra_args_typenames(const c10::SmallVector<at::Scalar>& extra_args) {
40
+ c10::SmallVector<std::string> args_typenames(extra_args.size());
41
+ for (const auto i : c10::irange(extra_args.size())) {
42
+ args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type());
43
+ }
44
+ return args_typenames;
45
+ }
46
+
47
+ int can_vectorize_up_to(at::ScalarType type, char* pointer) {
48
+ switch(type) {
49
+ #define DEFINE_CASE(ctype, scalartype) \
50
+ case ScalarType::scalartype : return memory::can_vectorize_up_to<ctype>(pointer);
51
+
52
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
53
+ #undef DEFINE_CASE
54
+
55
+ default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
56
+ }
57
+ }
58
+
59
+ // jitted version of the above
60
+ // See Note [Jiterator], this relies on the assumptions enumerated there
61
+ int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) {
62
+ const at::ScalarType common_dtype = iter.common_dtype();
63
+ const at::ScalarType result_dtype = common_dtype;
64
+
65
+ // Deals with output
66
+ int result = can_vectorize_up_to(result_dtype, static_cast<char*>(iter.data_ptr(0)));
67
+
68
+ // Incorporates input(s)
69
+ for (auto i = 1; i < iter.ntensors(); ++i) {
70
+ result = std::min<int>(result, can_vectorize_up_to(common_dtype, static_cast<char*>(iter.data_ptr(i))));
71
+ }
72
+
73
+ return result;
74
+ }
75
+
76
+ template<bool IS_INPUT, int N>
77
+ static std::unique_ptr<OffsetCalculator<N>> make_unique_offset_calculator(
78
+ const TensorIteratorBase& iter) {
79
+ // array size can not be 0, this happens when N == 0
80
+ constexpr int array_size = std::max<int>(N, 1);
81
+ TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs()));
82
+
83
+ std::array<const int64_t*, array_size> strides;
84
+ int64_t element_sizes[array_size];
85
+ for (int i = 0; i < N; i++) {
86
+ int index = IS_INPUT ? i + iter.noutputs() : i;
87
+ strides[i] = iter.strides(index).data();
88
+ element_sizes[i] = iter.element_size(index);
89
+ }
90
+ return std::make_unique<OffsetCalculator<N>>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes);
91
+ }
92
+
93
+ template <bool IS_INPUT>
94
+ struct OffsetCalculatorVariant {
95
+ #define DEFINE_CASE(index) std::unique_ptr<OffsetCalculator<index>>
96
+ using OffsetCalculatorTypes = std::variant<
97
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
98
+ >;
99
+ #undef DEFINE_CASE
100
+
101
+ OffsetCalculatorVariant(const TensorIteratorBase& iter) {
102
+ int num = IS_INPUT ? iter.ninputs() : iter.noutputs();
103
+
104
+ switch(num) {
105
+ #define DEFINE_CASE(index) \
106
+ case index : v = make_unique_offset_calculator<IS_INPUT, index>(iter); break;
107
+
108
+ AT_FOR_8_CASES(DEFINE_CASE)
109
+ #undef DEFINE_CASE
110
+ default:
111
+ TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num);
112
+ }
113
+ }
114
+
115
+ void* data_ptr() {
116
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
117
+ }
118
+
119
+ private:
120
+ OffsetCalculatorTypes v;
121
+ };
122
+
123
+ struct ArrayVariant {
124
+ // works for up to 8 input + 8 outputs
125
+ #define DEFINE_CASE(index) at::detail::Array<char*, index>, at::detail::Array<char*, index+8>
126
+ using ArrayTypes = std::variant<
127
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
128
+ >;
129
+ #undef DEFINE_CASE
130
+
131
+ ArrayVariant(const TensorIteratorBase& iter) {
132
+ int ntensors = iter.ntensors();
133
+ switch(ntensors) {
134
+ #define DEFINE_CASE(index) \
135
+ case index: array = at::detail::Array<char*, index>{}; break; \
136
+ case index+8: array = at::detail::Array<char*, index+8>{}; break;
137
+
138
+ AT_FOR_8_CASES(DEFINE_CASE)
139
+ #undef DEFINE_CASE
140
+
141
+ default:
142
+ TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors);
143
+ }
144
+
145
+ std::visit([&](auto& a) {
146
+ for (auto i = 0; i < ntensors; ++i) {
147
+ a[i] = (char*)iter.data_ptr(i);
148
+ }
149
+ }, array);
150
+ }
151
+
152
+ void* data_ptr() {
153
+ return std::visit([](auto & a){ return static_cast<void*>(&a); }, array);
154
+ }
155
+
156
+ private:
157
+ ArrayTypes array;
158
+ };
159
+
160
+ struct TrivialOffsetCalculatorVariant {
161
+ #define DEFINE_CASE(index) TrivialOffsetCalculator<index>
162
+ using TrivialOffsetCalculatorTypes = std::variant<
163
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
164
+ >;
165
+ #undef DEFINE_CASE
166
+
167
+ TrivialOffsetCalculatorVariant(int num) {
168
+ switch(num) {
169
+ #define DEFINE_CASE(index) \
170
+ case index: v = TrivialOffsetCalculator<index>(); break;
171
+
172
+ AT_FOR_8_CASES(DEFINE_CASE)
173
+ #undef DEFINE_CASE
174
+
175
+ default:
176
+ TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num);
177
+ }
178
+ }
179
+
180
+ void* data_ptr() {
181
+ return std::visit([](auto & v){ return static_cast<void*>(&v); }, v);
182
+ }
183
+
184
+ private:
185
+ TrivialOffsetCalculatorTypes v;
186
+ };
187
+
188
+ struct LoadWithCastVariant {
189
+ #define DEFINE_CASE(index) std::unique_ptr<memory::LoadWithCast<index>>
190
+ using LoadWithCastPtr = std::variant<
191
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
192
+ >;
193
+ #undef DEFINE_CASE
194
+
195
+ LoadWithCastVariant(const TensorIteratorBase& iter) {
196
+ int arity = iter.ninputs();
197
+ switch(arity) {
198
+ #define DEFINE_CASE(index) \
199
+ case index: v = std::make_unique<memory::LoadWithCast<index>>(iter); break;
200
+
201
+ AT_FOR_8_CASES(DEFINE_CASE)
202
+ #undef DEFINE_CASE
203
+
204
+ default:
205
+ TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity);
206
+ }
207
+ }
208
+
209
+ void* data_ptr() {
210
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
211
+ }
212
+
213
+ private:
214
+ LoadWithCastPtr v;
215
+ };
216
+
217
+ struct StoreWithCastVariant {
218
+ #define DEFINE_CASE(index) std::unique_ptr<memory::StoreWithCast<index>>
219
+ using StoreWithCastPtr = std::variant<
220
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
221
+ >;
222
+ #undef DEFINE_CASE
223
+
224
+ StoreWithCastVariant(const TensorIteratorBase& iter) {
225
+ int num = iter.noutputs();
226
+ switch(num) {
227
+ #define DEFINE_CASE(index) \
228
+ case index: v = std::make_unique<memory::StoreWithCast<index>>(iter); break;
229
+
230
+ AT_FOR_8_CASES(DEFINE_CASE)
231
+ #undef DEFINE_CASE
232
+
233
+ default:
234
+ TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num);
235
+ }
236
+ }
237
+
238
+ void* data_ptr() {
239
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
240
+ }
241
+
242
+ private:
243
+ StoreWithCastPtr v;
244
+ };
245
+
246
+ } // namespace at::native
247
+
248
+
249
+ #endif // AT_USE_JITERATOR()
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ TORCH_CUDA_CPP_API const std::string &get_traits_string();
9
+ TORCH_CUDA_CPP_API const std::string &get_cmath_string();
10
+ TORCH_CUDA_CPP_API const std::string &get_complex_body_string();
11
+ TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string();
12
+ TORCH_CUDA_CPP_API const std::string &get_complex_math_string();
13
+
14
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <ATen/core/TensorBase.h>
5
+
6
+ namespace at::detail {
7
+
8
+ C10_EXPORT TensorBase empty_mps(
9
+ IntArrayRef size,
10
+ c10::optional<ScalarType> dtype_opt,
11
+ c10::optional<Layout> layout_opt,
12
+ c10::optional<Device> device_opt,
13
+ c10::optional<bool> pin_memory_opt,
14
+ c10::optional<c10::MemoryFormat> memory_format_opt);
15
+ C10_EXPORT TensorBase empty_mps(
16
+ IntArrayRef size, const TensorOptions &options);
17
+
18
+ C10_EXPORT TensorBase empty_strided_mps(
19
+ IntArrayRef size,
20
+ IntArrayRef stride,
21
+ ScalarType dtype,
22
+ c10::optional<Device> device_opt);
23
+
24
+ C10_EXPORT TensorBase empty_strided_mps(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ const TensorOptions &options);
28
+
29
+ } // namespace at::detail
venv/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at::mps {
4
+
5
+ static const char * indexing_metal_shaders = R"INDEX_METAL(
6
+ #include <metal_stdlib>
7
+ #include <metal_atomic>
8
+
9
+ using namespace metal;
10
+
11
+ #if __METAL_VERSION__ < 300
12
+ struct IndexAB {
13
+ // Allow up to 16 indices
14
+ metal::array<constant void *, 16> indexArray [[ id(0) ]];
15
+ };
16
+ #else
17
+ struct IndexAB {
18
+ constant int64_t* indexArray;
19
+ };
20
+
21
+ #endif
22
+
23
+ template<typename T, typename OffsetsT>
24
+ kernel void index_select(
25
+ #if __METAL_VERSION__ >= 300
26
+ constant IndexAB * indexAB [[buffer(0)]],
27
+ #else
28
+ constant IndexAB & indexAB [[buffer(0)]],
29
+ #endif
30
+ constant void * indexSizes [[buffer(1)]],
31
+ constant void * indexStrides [[buffer(2)]],
32
+ constant OffsetsT * offsets [[buffer(3)]],
33
+ constant void * inputData [[buffer(4)]],
34
+ device void * outputData [[buffer(5)]],
35
+ constant uint32_t & num_indices [[buffer(6)]],
36
+ uint thread_index [[thread_position_in_grid]]) {
37
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
38
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
39
+ int64_t offset = 0;
40
+ for (uint32_t i = 0; i < num_indices; i++) {
41
+ #if __METAL_VERSION__ >= 300
42
+ constant int64_t* indexArray = indexAB[i].indexArray;
43
+ #else
44
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
45
+ #endif
46
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
47
+ if (index < 0) {
48
+ index += index_sizes[i];
49
+ }
50
+ offset += index * index_strides[i];
51
+ }
52
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x);
53
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y + offset);
54
+ *out = *in;
55
+ }
56
+
57
+ template<typename T, typename OffsetsT>
58
+ void index_put_impl(
59
+ #if __METAL_VERSION__ >= 300
60
+ constant IndexAB * indexAB,
61
+ #else
62
+ constant IndexAB & indexAB,
63
+ #endif
64
+ constant int64_t * index_sizes,
65
+ constant int64_t * index_strides,
66
+ constant OffsetsT * offsets,
67
+ constant void * inputData,
68
+ device void * outputData,
69
+ constant uint32_t & num_indices,
70
+ uint thread_index) {
71
+ int64_t offset = 0;
72
+ for (uint32_t i = 0; i < num_indices; i++) {
73
+ #if __METAL_VERSION__ >= 300
74
+ constant int64_t* indexArray = indexAB[i].indexArray;
75
+ #else
76
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
77
+ #endif
78
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
79
+
80
+ if (index < 0) {
81
+ index += index_sizes[i];
82
+ }
83
+ offset += index * index_strides[i];
84
+ }
85
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
86
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
87
+ *out = *in;
88
+ }
89
+
90
+ template<typename T, typename OffsetsT>
91
+ kernel void index_put_serial(
92
+ #if __METAL_VERSION__ >= 300
93
+ constant IndexAB * indexAB [[buffer(0)]],
94
+ #else
95
+ constant IndexAB & indexAB [[buffer(0)]],
96
+ #endif
97
+ constant void * indexSizes [[buffer(1)]],
98
+ constant void * indexStrides [[buffer(2)]],
99
+ constant OffsetsT * offsets [[buffer(3)]],
100
+ constant void * inputData [[buffer(4)]],
101
+ device void * outputData [[buffer(5)]],
102
+ constant uint32_t & num_indices [[buffer(6)]],
103
+ constant uint * numIters [[buffer(7)]],
104
+ uint thread_index [[thread_position_in_grid]]) {
105
+
106
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
107
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
108
+
109
+ for (uint iter_i = 0; iter_i < *numIters; iter_i++) {
110
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, iter_i);
111
+ }
112
+ }
113
+
114
+ template<typename T, typename OffsetsT>
115
+ kernel void index_put(
116
+ #if __METAL_VERSION__ >= 300
117
+ constant IndexAB * indexAB [[buffer(0)]],
118
+ #else
119
+ constant IndexAB & indexAB [[buffer(0)]],
120
+ #endif
121
+ constant void * indexSizes [[buffer(1)]],
122
+ constant void * indexStrides [[buffer(2)]],
123
+ constant OffsetsT * offsets [[buffer(3)]],
124
+ constant void * inputData [[buffer(4)]],
125
+ device void * outputData [[buffer(5)]],
126
+ constant uint32_t & num_indices [[buffer(6)]],
127
+ uint thread_index [[thread_position_in_grid]]) {
128
+
129
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
130
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
131
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, thread_index);
132
+ }
133
+
134
+ #if __METAL_VERSION__ < 300
135
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
136
+ template \
137
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
138
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
139
+ constant IndexAB & indexAB [[buffer(0)]], \
140
+ constant void * indexSizes [[buffer(1)]], \
141
+ constant void * indexStrides [[buffer(2)]], \
142
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
143
+ constant void * inputData [[buffer(4)]], \
144
+ device void * outputData [[buffer(5)]], \
145
+ constant uint32_t & num_indices [[buffer(6)]], \
146
+ uint thread_index [[thread_position_in_grid]]);
147
+ #else
148
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
149
+ template \
150
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
151
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
152
+ constant IndexAB * indexAB [[buffer(0)]], \
153
+ constant void * indexSizes [[buffer(1)]], \
154
+ constant void * indexStrides [[buffer(2)]], \
155
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
156
+ constant void * inputData [[buffer(4)]], \
157
+ device void * outputData [[buffer(5)]], \
158
+ constant uint32_t & num_indices [[buffer(6)]], \
159
+ uint thread_index [[thread_position_in_grid]]);
160
+ #endif
161
+
162
+ #define REGISTER_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
163
+ REGISTER_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \
164
+ REGISTER_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \
165
+ REGISTER_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \
166
+ REGISTER_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \
167
+ REGISTER_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \
168
+ REGISTER_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \
169
+ REGISTER_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \
170
+ REGISTER_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3);
171
+
172
+ REGISTER_INDEX_OP_ALL_DTYPES(select);
173
+ REGISTER_INDEX_OP_ALL_DTYPES(put);
174
+
175
+ #if __METAL_VERSION__ < 300
176
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
177
+ template \
178
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
179
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
180
+ constant IndexAB & indexAB [[buffer(0)]], \
181
+ constant void * indexSizes [[buffer(1)]], \
182
+ constant void * indexStrides [[buffer(2)]], \
183
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
184
+ constant void * inputData [[buffer(4)]], \
185
+ device void * outputData [[buffer(5)]], \
186
+ constant uint32_t & num_indices [[buffer(6)]], \
187
+ constant uint * numIters [[buffer(7)]], \
188
+ uint thread_index [[thread_position_in_grid]]);
189
+ #else
190
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \
191
+ template \
192
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \
193
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE, IDX_DTYPE>( \
194
+ constant IndexAB * indexAB [[buffer(0)]], \
195
+ constant void * indexSizes [[buffer(1)]], \
196
+ constant void * indexStrides [[buffer(2)]], \
197
+ constant IDX_DTYPE * offsets [[buffer(3)]], \
198
+ constant void * inputData [[buffer(4)]], \
199
+ device void * outputData [[buffer(5)]], \
200
+ constant uint32_t & num_indices [[buffer(6)]], \
201
+ constant uint * numIters [[buffer(7)]], \
202
+ uint thread_index [[thread_position_in_grid]]);
203
+ #endif
204
+
205
+ #define REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
206
+ REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \
207
+ REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \
208
+ REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \
209
+ REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \
210
+ REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \
211
+ REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \
212
+ REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \
213
+ REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3);
214
+
215
+ REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(put_serial);
216
+
217
+ template<typename StridesT, typename DataT>
218
+ kernel void kernel_index_offsets(constant StridesT * strides [[buffer(0)]],
219
+ device DataT * data_offsets [[buffer(1)]],
220
+ constant uint * iter_shape [[buffer(2)]],
221
+ constant uint & num_dimensions [[buffer(3)]],
222
+ uint thread_index [[thread_position_in_grid]]) {
223
+ data_offsets[thread_index] = 0;
224
+ uint32_t idx = thread_index;
225
+ for (uint32_t dim = 0; dim < num_dimensions; dim++) {
226
+ uint32_t remainder = idx % iter_shape[dim];
227
+ idx /= iter_shape[dim];
228
+
229
+ data_offsets[thread_index] += remainder * DataT(strides[dim]);
230
+ }
231
+ }
232
+
233
+ template
234
+ [[host_name("kernel_index_offsets_32")]]
235
+ kernel void kernel_index_offsets<packed_uint3, uint3>(
236
+ constant packed_uint3 * strides [[buffer(0)]],
237
+ device uint3 * data_offsets [[buffer(1)]],
238
+ constant uint * iter_shape [[buffer(2)]],
239
+ constant uint & num_dimensions [[buffer(3)]],
240
+ uint thread_index [[thread_position_in_grid]]);
241
+
242
+ template
243
+ [[host_name("kernel_index_offsets_64")]]
244
+ kernel void kernel_index_offsets<packed_uint3, ulong3>(
245
+ constant packed_uint3 * strides [[buffer(0)]],
246
+ device ulong3 * data_offsets [[buffer(1)]],
247
+ constant uint * iter_shape [[buffer(2)]],
248
+ constant uint & num_dimensions [[buffer(3)]],
249
+ uint thread_index [[thread_position_in_grid]]);
250
+
251
+ template<typename T, typename E, typename OffsetsT>
252
+ kernel void index_put_accumulate_native_dtypes(
253
+ #if __METAL_VERSION__ >= 300
254
+ constant IndexAB * indexAB [[buffer(0)]],
255
+ #else
256
+ constant IndexAB & indexAB [[buffer(0)]],
257
+ #endif
258
+ constant void * indexSizes [[buffer(1)]],
259
+ constant void * indexStrides [[buffer(2)]],
260
+ constant OffsetsT * offsets [[buffer(3)]],
261
+ constant void * inputData [[buffer(4)]],
262
+ device void * outputData [[buffer(5)]],
263
+ constant uint32_t & num_indices [[buffer(6)]],
264
+ uint thread_index [[thread_position_in_grid]]) {
265
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
266
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
267
+ int64_t offset = 0;
268
+ for (uint32_t i = 0; i < num_indices; i++) {
269
+ #if __METAL_VERSION__ >= 300
270
+ constant int64_t* indexArray = indexAB[i].indexArray;
271
+ #else
272
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
273
+ #endif
274
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
275
+ if (index < 0) {
276
+ index += index_sizes[i];
277
+ }
278
+ offset += index * index_strides[i];
279
+ }
280
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
281
+ constant E * in = (constant E*)((constant char*)inputData + offsets[thread_index].y);
282
+ atomic_fetch_add_explicit(out, *in, memory_order_relaxed);
283
+ }
284
+
285
+ template<typename T>
286
+ __attribute__((__always_inline__)) void atomic_fetch_add_relaxed(device void * addr, T value) {
287
+ device atomic_uint* uintAddr = (device atomic_uint*)addr;
288
+ uint expected = atomic_load_explicit(uintAddr, memory_order_relaxed);
289
+ T updated = as_type<T>(expected) + value;
290
+ while (!atomic_compare_exchange_weak_explicit(uintAddr, &expected, as_type<uint>(updated), memory_order_relaxed, memory_order_relaxed)) {
291
+ updated = as_type<T>(expected) + value;
292
+ }
293
+ }
294
+
295
+ template<typename T, typename OffsetsT>
296
+ kernel void atomic_index_put_accumulate(
297
+ #if __METAL_VERSION__ >= 300
298
+ constant IndexAB * indexAB [[buffer(0)]],
299
+ #else
300
+ constant IndexAB & indexAB [[buffer(0)]],
301
+ #endif
302
+ constant void * indexSizes [[buffer(1)]],
303
+ constant void * indexStrides [[buffer(2)]],
304
+ constant OffsetsT * offsets [[buffer(3)]],
305
+ constant void * inputData [[buffer(4)]],
306
+ device void * outputData [[buffer(5)]],
307
+ constant uint32_t & num_indices [[buffer(6)]],
308
+ uint thread_index [[thread_position_in_grid]]) {
309
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
310
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
311
+ int64_t offset = 0;
312
+ for (uint32_t i = 0; i < num_indices; i++) {
313
+ #if __METAL_VERSION__ >= 300
314
+ constant int64_t* indexArray = indexAB[i].indexArray;
315
+ #else
316
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
317
+ #endif
318
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
319
+ if (index < 0) {
320
+ index += index_sizes[i];
321
+ }
322
+ offset += index * index_strides[i];
323
+ }
324
+ device void * out = (device void*)((device char*)outputData + offsets[thread_index].x + offset);
325
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
326
+ atomic_fetch_add_relaxed<T>(out, *in);
327
+ }
328
+
329
+ template
330
+ [[host_name("index_put_accumulate_32bit_float_idx32")]]
331
+ kernel void atomic_index_put_accumulate<float, uint3>(
332
+ #if __METAL_VERSION__ >= 300
333
+ constant IndexAB * indexAB [[buffer(0)]],
334
+ #else
335
+ constant IndexAB & indexAB [[buffer(0)]],
336
+ #endif
337
+ constant void * indexSizes [[buffer(1)]],
338
+ constant void * indexStrides [[buffer(2)]],
339
+ constant uint3 * offsets [[buffer(3)]],
340
+ constant void * inputData [[buffer(4)]],
341
+ device void * outputData [[buffer(5)]],
342
+ constant uint32_t & num_indices [[buffer(6)]],
343
+ uint thread_index [[thread_position_in_grid]]);
344
+
345
+ template
346
+ [[host_name("index_put_accumulate_32bit_float_idx64")]]
347
+ kernel void atomic_index_put_accumulate<float, ulong3>(
348
+ #if __METAL_VERSION__ >= 300
349
+ constant IndexAB * indexAB [[buffer(0)]],
350
+ #else
351
+ constant IndexAB & indexAB [[buffer(0)]],
352
+ #endif
353
+ constant void * indexSizes [[buffer(1)]],
354
+ constant void * indexStrides [[buffer(2)]],
355
+ constant ulong3 * offsets [[buffer(3)]],
356
+ constant void * inputData [[buffer(4)]],
357
+ device void * outputData [[buffer(5)]],
358
+ constant uint32_t & num_indices [[buffer(6)]],
359
+ uint thread_index [[thread_position_in_grid]]);
360
+
361
+ template
362
+ [[host_name("index_put_accumulate_32bit_int_idx32")]]
363
+ kernel void index_put_accumulate_native_dtypes<atomic_int, int, uint3>(
364
+ #if __METAL_VERSION__ >= 300
365
+ constant IndexAB * indexAB [[buffer(0)]],
366
+ #else
367
+ constant IndexAB & indexAB [[buffer(0)]],
368
+ #endif
369
+ constant void * indexSizes [[buffer(1)]],
370
+ constant void * indexStrides [[buffer(2)]],
371
+ constant uint3 * offsets [[buffer(3)]],
372
+ constant void * inputData [[buffer(4)]],
373
+ device void * outputData [[buffer(5)]],
374
+ constant uint32_t & num_indices [[buffer(6)]],
375
+ uint thread_index [[thread_position_in_grid]]);
376
+
377
+ template
378
+ [[host_name("index_put_accumulate_32bit_int_idx64")]]
379
+ kernel void index_put_accumulate_native_dtypes<atomic_int, int, ulong3>(
380
+ #if __METAL_VERSION__ >= 300
381
+ constant IndexAB * indexAB [[buffer(0)]],
382
+ #else
383
+ constant IndexAB & indexAB [[buffer(0)]],
384
+ #endif
385
+ constant void * indexSizes [[buffer(1)]],
386
+ constant void * indexStrides [[buffer(2)]],
387
+ constant ulong3 * offsets [[buffer(3)]],
388
+ constant void * inputData [[buffer(4)]],
389
+ device void * outputData [[buffer(5)]],
390
+ constant uint32_t & num_indices [[buffer(6)]],
391
+ uint thread_index [[thread_position_in_grid]]);
392
+ )INDEX_METAL";
393
+
394
+ static const char *SCATTER_OPS_TEMPLATE = R"METAL_SCATTER(
395
+ struct __attribute__ ((packed)) packed_uint5{{
396
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
397
+ }};
398
+
399
+ template<typename Y, typename X>
400
+ Y cast(const X x);
401
+
402
+ template<>
403
+ {1} cast<{1}, {0}>(const {0} x) {{
404
+ return {2};
405
+ }}
406
+
407
+ kernel void scatter_kernel_5(uint linear_index [[thread_position_in_grid]],
408
+ constant void * src_ [[buffer(0)]],
409
+ device void * dst_ [[buffer(1)]],
410
+ constant packed_uint5 & size [[buffer(2)]],
411
+ constant packed_uint5 & stride [[buffer(3)]],
412
+ constant uint32_t & numel [[buffer(4)]]) {{
413
+ if (linear_index >= numel) return;
414
+
415
+ constant {0} * src = (constant {0} *)src_;
416
+ device {1} * dst = (device {1} *)dst_;
417
+
418
+ packed_uint5 local_index;
419
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
420
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
421
+ local_index.z = linear_index / (size.u * size.w) % size.z;
422
+ local_index.w = linear_index / size.u % size.w;
423
+ local_index.u = linear_index % size.u;
424
+
425
+ packed_uint5 strided_index;
426
+ strided_index.x = local_index.x * stride.x;
427
+ strided_index.y = local_index.y * stride.y;
428
+ strided_index.z = local_index.z * stride.z;
429
+ strided_index.w = local_index.w * stride.w;
430
+ strided_index.u = local_index.u * stride.u;
431
+
432
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u] = cast<{1}>(src[linear_index]);
433
+ }}
434
+
435
+ kernel void scatter_kernel_4(uint linear_index [[thread_position_in_grid]],
436
+ constant void * src_ [[buffer(0)]],
437
+ device void * dst_ [[buffer(1)]],
438
+ constant packed_uint4 & size [[buffer(2)]],
439
+ constant packed_uint4 & stride [[buffer(3)]],
440
+ constant uint32_t & numel [[buffer(4)]]) {{
441
+ if (linear_index >= numel) return;
442
+
443
+ constant {0} * src = (constant {0} *)src_;
444
+ device {1} * dst = (device {1} *)dst_;
445
+
446
+ packed_uint4 local_index;
447
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
448
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
449
+ local_index.z = linear_index / size[3] % size[2];
450
+ local_index.w = linear_index % size[3];
451
+
452
+ const packed_uint4 strided_index = local_index * stride;
453
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w] = cast<{1}>(src[linear_index]);
454
+ }}
455
+
456
+ kernel void scatter_kernel_3(uint linear_index [[thread_position_in_grid]],
457
+ constant void * src_ [[buffer(0)]],
458
+ device void * dst_ [[buffer(1)]],
459
+ constant packed_uint3 & size [[buffer(2)]],
460
+ constant packed_uint3 & stride [[buffer(3)]],
461
+ constant uint32_t & numel [[buffer(4)]]) {{
462
+ if (linear_index >= numel) return;
463
+
464
+ constant {0} * src = (constant {0} *)src_;
465
+ device {1} * dst = (device {1} *)dst_;
466
+
467
+ packed_uint3 local_index;
468
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
469
+ local_index.y = linear_index / size[2] % size[1];
470
+ local_index.z = linear_index % size[2];
471
+
472
+ const packed_uint3 strided_index = local_index * stride;
473
+ dst[strided_index.x + strided_index.y + strided_index.z] = cast<{1}>(src[linear_index]);
474
+ }}
475
+
476
+ kernel void scatter_kernel_2(uint linear_index [[thread_position_in_grid]],
477
+ constant void * src_ [[buffer(0)]],
478
+ device void * dst_ [[buffer(1)]],
479
+ constant packed_uint2 & size [[buffer(2)]],
480
+ constant packed_uint2 & stride [[buffer(3)]],
481
+ constant uint32_t & numel [[buffer(4)]]) {{
482
+ if (linear_index >= numel) return;
483
+
484
+ constant {0} * src = (constant {0} *)src_;
485
+ device {1} * dst = (device {1} *)dst_;
486
+
487
+ packed_uint2 local_index;
488
+ local_index.x = linear_index / size[1] % size[0];
489
+ local_index.y = linear_index % size[1];
490
+
491
+ const packed_uint2 strided_index = local_index * stride;
492
+ dst[strided_index.x + strided_index.y] = cast<{1}>(src[linear_index]);
493
+ }}
494
+
495
+ kernel void scatter_kernel_1(uint linear_index [[thread_position_in_grid]],
496
+ constant void * src_ [[buffer(0)]],
497
+ device void * dst_ [[buffer(1)]],
498
+ constant int & size [[buffer(2)]],
499
+ constant int & stride [[buffer(3)]],
500
+ constant uint32_t & numel [[buffer(4)]]) {{
501
+ if (linear_index >= numel) return;
502
+
503
+ constant {0} * src = (constant {0} *)src_;
504
+ device {1} * dst = (device {1} *)dst_;
505
+
506
+ const int local_index = linear_index % size;
507
+ const int strided_index = local_index * stride;
508
+ dst[strided_index] = cast<{1}>(src[linear_index]);
509
+ }}
510
+ )METAL_SCATTER";
511
+
512
+ static const char *GATHER_OPS_TEMPLATE = R"METAL_GATHER(
513
+ struct __attribute__ ((packed)) packed_uint5{{
514
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
515
+ }};
516
+
517
+ template<typename Y, typename X>
518
+ Y cast(const X x);
519
+
520
+ template<>
521
+ {1} cast<{1}, {0}>(const {0} x) {{
522
+ return {2};
523
+ }}
524
+
525
+ kernel void gather_kernel_5(uint linear_index [[thread_position_in_grid]],
526
+ constant void * src_ [[buffer(0)]],
527
+ device void * dst_ [[buffer(1)]],
528
+ constant packed_uint5 & size [[buffer(2)]],
529
+ constant packed_uint5 & stride [[buffer(3)]],
530
+ constant uint32_t & numel [[buffer(4)]]) {{
531
+ if (linear_index >= numel) return;
532
+
533
+ constant {0} * src = (constant {0} *)src_;
534
+ device {1} * dst = (device {1} *)dst_;
535
+
536
+
537
+ packed_uint5 local_index;
538
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
539
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
540
+ local_index.z = linear_index / (size.u * size.w) % size.z;
541
+ local_index.w = linear_index / size.u % size.w;
542
+ local_index.u = linear_index % size.u;
543
+
544
+ packed_uint5 strided_index;
545
+ strided_index.x = local_index.x * stride.x;
546
+ strided_index.y = local_index.y * stride.y;
547
+ strided_index.z = local_index.z * stride.z;
548
+ strided_index.w = local_index.w * stride.w;
549
+ strided_index.u = local_index.u * stride.u;
550
+
551
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u]);
552
+ }}
553
+
554
+ kernel void gather_kernel_4(uint linear_index [[thread_position_in_grid]],
555
+ constant void * src_ [[buffer(0)]],
556
+ device void * dst_ [[buffer(1)]],
557
+ constant packed_uint4 & size [[buffer(2)]],
558
+ constant packed_uint4 & stride [[buffer(3)]],
559
+ constant uint32_t & numel [[buffer(4)]]) {{
560
+ if (linear_index >= numel) return;
561
+
562
+ constant {0} * src = (constant {0} *)src_;
563
+ device {1} * dst = (device {1} *)dst_;
564
+
565
+ packed_uint4 local_index;
566
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
567
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
568
+ local_index.z = linear_index / size[3] % size[2];
569
+ local_index.w = linear_index % size[3];
570
+
571
+ const packed_uint4 strided_index = local_index * stride;
572
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w]);
573
+ }}
574
+
575
+ kernel void gather_kernel_3(uint linear_index [[thread_position_in_grid]],
576
+ constant void * src_ [[buffer(0)]],
577
+ device void * dst_ [[buffer(1)]],
578
+ constant packed_uint3 & size [[buffer(2)]],
579
+ constant packed_uint3 & stride [[buffer(3)]],
580
+ constant uint32_t & numel [[buffer(4)]]) {{
581
+ if (linear_index >= numel) return;
582
+
583
+ constant {0} * src = (constant {0} *)src_;
584
+ device {1} * dst = (device {1} *)dst_;
585
+
586
+ packed_uint3 local_index;
587
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
588
+ local_index.y = linear_index / size[2] % size[1];
589
+ local_index.z = linear_index % size[2];
590
+
591
+ const packed_uint3 strided_index = local_index * stride;
592
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z]);
593
+ }}
594
+
595
+ kernel void gather_kernel_2(uint linear_index [[thread_position_in_grid]],
596
+ constant void * src_ [[buffer(0)]],
597
+ device void * dst_ [[buffer(1)]],
598
+ constant packed_uint2 & size [[buffer(2)]],
599
+ constant packed_uint2 & stride [[buffer(3)]],
600
+ constant uint32_t & numel [[buffer(4)]]) {{
601
+ if (linear_index >= numel) return;
602
+
603
+ constant {0} * src = (constant {0} *)src_;
604
+ device {1} * dst = (device {1} *)dst_;
605
+
606
+ packed_uint2 local_index;
607
+ local_index.x = linear_index / size[1] % size[0];
608
+ local_index.y = linear_index % size[1];
609
+
610
+ const packed_uint2 strided_index = local_index * stride;
611
+ dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y]);
612
+ }}
613
+
614
+ kernel void gather_kernel_1(uint linear_index [[thread_position_in_grid]],
615
+ constant void * src_ [[buffer(0)]],
616
+ device void * dst_ [[buffer(1)]],
617
+ constant int & size [[buffer(2)]],
618
+ constant int & stride [[buffer(3)]],
619
+ constant uint32_t & numel [[buffer(4)]]) {{
620
+ if (linear_index >= numel) return;
621
+
622
+ constant {0} * src = (constant {0} *)src_;
623
+ device {1} * dst = (device {1} *)dst_;
624
+
625
+ const int local_index = linear_index % size;
626
+ const int strided_index = local_index * stride;
627
+ dst[linear_index] = cast<{1}>(src[strided_index]);
628
+ }}
629
+ )METAL_GATHER";
630
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSAllocatorInterface.h>
6
+ #include <ATen/mps/MPSEvent.h>
7
+ #include <ATen/mps/MPSStream.h>
8
+
9
+ #include <cstdio>
10
+ #include <mutex>
11
+ #include <set>
12
+ #include <unordered_set>
13
+ #include <mach/vm_page_size.h>
14
+ #include <c10/util/flat_hash_map.h>
15
+
16
+ // this implementation is based on CUDACachingAllocator.
17
+ // It utilizes Metal Heaps to improve the performance with buffer allocation.
18
+ // Do not include this header. Use MPSAllocatorInterface.h instead.
19
+ // TODO: Unify the logic with CUDACachingAllocator and remove redundant code.
20
+ namespace at::mps::HeapAllocator {
21
+
22
+ static const size_t kMaxSmallAlloc = MB(1); // largest "small" allocation is 1 MiB
23
+ static const size_t kMinLargeAlloc = MB(10); // allocations between 1 and 10 MiB may use kLargeHeap
24
+ static const size_t kRoundLarge = MB(2); // round up large allocations to 2 MiB
25
+ static const size_t kSmallHeap = MB(8); // "small" allocations are packed in 8 MiB heaps
26
+ static const size_t kLargeHeap = MB(32); // "large" allocations may be packed in 32 MiB heaps
27
+ static const size_t kXLargeHeapD = MB(128); // "extra large" allocations on Discrete devices may be packed in 128 MiB heaps
28
+ static const size_t kXLargeHeapU = MB(1024); // "extra large" allocations on Unified devices may be packed in 1 GiB heaps
29
+ static const size_t kMaxScalarAlloc = (sizeof(int64_t)); // largest "scalar" allocation
30
+
31
+ // buffer pools could be customized with a combination of usage flags
32
+ enum UsageFlags : uint32_t {
33
+ PRIVATE = 0,
34
+ SMALL = (1 << 0), // small heaps have sizes of kSmallHeap, and large ones kLargeHeap
35
+ SHARED = (1 << 1), // shared pools allocated on devices with unified memory; otherwise, private between host/device
36
+ MANAGED = (1 << 2), // managed storage mode
37
+ HAZARD = (1 << 3), // enables Automatic Hazard Tracking for the resources allocated on the pool
38
+ SCALAR = (1 << 4), // used to import CPU scalar values to GPU and use them in MPS Stream
39
+ };
40
+ // debug verbosity flags
41
+ enum DebugVerbosity : uint32_t {
42
+ SILENT = 0,
43
+ PROFILING = (1 << 0), // print generic profiling data for total system memory usage
44
+ ALLOCATIONS = (1 << 1), // print buffer allocations
45
+ RECYCLES = (1 << 2), // print buffer recycling
46
+ RELEASES = (1 << 3), // print buffer releases
47
+ LARGE_ONLY = (1 << 4), // only log large buffer pool transactions
48
+ };
49
+
50
+ struct HeapBlock;
51
+
52
+ struct BufferBlock {
53
+ id<MTLBuffer> buffer;
54
+ void* cpu_ptr = nullptr; // stores the pointer to CPU mapping of a Shared MTLBuffer
55
+ size_t size; // size after alignment
56
+ size_t requested_size; // requested size (before alignment)
57
+ // buffer shape is used for retrieving base of views in cached graphs
58
+ std::vector<int64_t> shape;
59
+ bool in_use = false;
60
+ HeapBlock* heap;
61
+ id_t buf_id;
62
+ // counter to candidate least recently used buffers for garbage collection
63
+ uint32_t gc_count = 0;
64
+ uint32_t use_count = 0;
65
+ // counter to assign unique ids to buffer blocks
66
+ static uint64_t buffer_counter;
67
+ // Metal events used to sync GPU/CPU operations on the shared-storage buffers
68
+ MPSEventPtr event;
69
+
70
+ BufferBlock(size_t Size, size_t RequestedSize = 0, const id<MTLBuffer> Buffer = nullptr,
71
+ HeapBlock* Heap = nullptr) :
72
+ buffer(Buffer), size(Size), requested_size(RequestedSize),
73
+ heap(Heap), buf_id(Buffer ? ++buffer_counter : 0) { }
74
+
75
+ static bool Comparator(const BufferBlock* a, const BufferBlock* b) {
76
+ return (a->size != b->size) ? a->size < b->size : (uintptr_t)a->buffer < (uintptr_t)b->buffer;
77
+ }
78
+ static size_t alignUp(size_t Size, size_t Alignment) {
79
+ assert(((Alignment - 1) & Alignment) == 0);
80
+ return ((Size + Alignment - 1) & ~(Alignment - 1));
81
+ }
82
+ uint32_t retainCount() const { return [buffer retainCount]; }
83
+ };
84
+ typedef bool (*BufferComparison)(const BufferBlock*, const BufferBlock*);
85
+
86
+ struct BufferPool;
87
+ struct AllocParams {
88
+ AllocParams(size_t Alloc_Size, size_t Requested_Size, BufferPool* Pool) :
89
+ search_key(Alloc_Size), pool(Pool), requested_size(Requested_Size) { }
90
+ size_t size() const { return search_key.size; }
91
+
92
+ BufferBlock search_key;
93
+ BufferPool* pool;
94
+ BufferBlock* buffer_block = nullptr;
95
+ size_t requested_size;
96
+ // true if we exceed the low watermark limit. In this case
97
+ // we apply strategies to relieve the pressure before allocation.
98
+ bool has_memory_pressure = false;
99
+ // true if we're allocating on a unified memory device
100
+ bool has_unified_memory = true;
101
+ };
102
+
103
+ struct HeapBlock {
104
+ id<MTLHeap> heap;
105
+ struct { size_t total, available; } size;
106
+ BufferPool* pool;
107
+ unsigned int n_buffers = 0;
108
+ id_t heap_id;
109
+ // indicates if we split this heap to sub-allocate 'several' buffers (otherwise single buffer)
110
+ bool is_split;
111
+ // counter to assign unique ids to heap blocks
112
+ static uint64_t heap_counter;
113
+
114
+ HeapBlock(size_t Size, const id<MTLHeap> Heap = nullptr, BufferPool *Pool = nullptr) :
115
+ heap(Heap), size({.total = Size, .available = Size}), pool(Pool),
116
+ heap_id(Heap ? ++heap_counter : 0), is_split(true) { }
117
+
118
+ static MTLResourceOptions getOptions(uint32_t usage) {
119
+ // TODO: check the caching performance of write-combined mode
120
+ MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache;
121
+
122
+ if (usage & UsageFlags::MANAGED)
123
+ options |= MTLResourceStorageModeManaged;
124
+ else if (usage & UsageFlags::SHARED)
125
+ options |= MTLResourceStorageModeShared;
126
+ else
127
+ options |= MTLResourceStorageModePrivate;
128
+
129
+ options |= (usage & UsageFlags::HAZARD) ? MTLResourceHazardTrackingModeTracked : MTLResourceHazardTrackingModeUntracked;
130
+
131
+ return options;
132
+ }
133
+
134
+ static HeapBlock* createHeapBlock(AllocParams& params, id<MTLDevice> device, uint32_t usage) {
135
+ HeapBlock *heapBlock = nullptr;
136
+ bool is_split = true;
137
+ const size_t size = params.size();
138
+ MTLHeapDescriptor *d = [MTLHeapDescriptor new];
139
+ if (d) {
140
+ const size_t kXLargeHeap = params.has_unified_memory ? kXLargeHeapU : kXLargeHeapD;
141
+ if (size <= kMaxSmallAlloc) {
142
+ d.size = kSmallHeap;
143
+ } else if (size < kMinLargeAlloc) {
144
+ d.size = kLargeHeap;
145
+ } else if (size < kXLargeHeap / 2 && !params.has_memory_pressure) {
146
+ d.size = kXLargeHeap;
147
+ } else {
148
+ d.size = kRoundLarge * ((size + kRoundLarge - 1) / kRoundLarge);
149
+ is_split = false;
150
+ }
151
+ d.storageMode = (usage & UsageFlags::SHARED) ? MTLStorageModeShared : MTLStorageModePrivate;
152
+ d.cpuCacheMode = MTLCPUCacheModeDefaultCache;
153
+ // this automatically handles Metal buffer access synchronizations at the
154
+ // cost of slightly lower performance.
155
+ d.hazardTrackingMode = (usage & UsageFlags::HAZARD) ? MTLHazardTrackingModeTracked : MTLHazardTrackingModeUntracked;
156
+ d.resourceOptions = getOptions(usage);
157
+ d.type = MTLHeapTypeAutomatic;
158
+ id<MTLHeap> heap = [device newHeapWithDescriptor: d];
159
+ if (heap) {
160
+ [heap setPurgeableState:MTLPurgeableStateNonVolatile];
161
+ const size_t heap_size = heapAvailableSize(heap);
162
+ heapBlock = new HeapBlock(heap_size, heap, params.pool);
163
+ if (heapBlock) {
164
+ heapBlock->is_split = is_split;
165
+ }
166
+ }
167
+ [d release];
168
+ }
169
+ return heapBlock;
170
+ }
171
+ static bool Comparator(const HeapBlock* a, const HeapBlock* b) {
172
+ return (a->size.available != b->size.available) ? a->size.available < b->size.available :
173
+ (uintptr_t)a->heap < (uintptr_t)b->heap;
174
+ }
175
+ static NSUInteger heapAvailableSize(id<MTLHeap> heap, size_t Alignment = vm_page_size) {
176
+ return [heap maxAvailableSizeWithAlignment:Alignment];
177
+ }
178
+ NSUInteger Size() {
179
+ return [heap size];
180
+ }
181
+ id<MTLBuffer> newMTLBuffer(size_t length, uint32_t usage) {
182
+ id<MTLBuffer> buf = [heap newBufferWithLength:length options:getOptions(usage)];
183
+ if (buf) {
184
+ updateAvailableSize();
185
+ n_buffers++;
186
+ }
187
+ return buf;
188
+ }
189
+ // returns the retainCount before releasing the buffer
190
+ uint32_t releaseMTLBuffer(id<MTLBuffer>& buffer) {
191
+ const uint32_t retainCount = [buffer retainCount];
192
+ [buffer release];
193
+ buffer = nil;
194
+ updateAvailableSize();
195
+ n_buffers--;
196
+ return retainCount;
197
+ }
198
+ // returns the retainCount before releasing the heap
199
+ uint32_t releaseMTLHeap() {
200
+ const uint32_t retainCount = [heap retainCount];
201
+ TORCH_INTERNAL_ASSERT(!n_buffers); // assert if heap isn't empty
202
+ [heap setPurgeableState:MTLPurgeableStateEmpty];
203
+ [heap release];
204
+ heap = nil;
205
+ size.available = 0;
206
+ return retainCount;
207
+ }
208
+ uint32_t retainCount() const { return [heap retainCount]; }
209
+ void updateAvailableSize() { size.available = heapAvailableSize(heap); }
210
+ };
211
+ typedef bool (*HeapComparison)(const HeapBlock*, const HeapBlock*);
212
+
213
+ struct BufferPool {
214
+ enum class Kind {
215
+ PRIVATE_SMALL,
216
+ PRIVATE_LARGE,
217
+ SHARED_SMALL,
218
+ SHARED_LARGE,
219
+ SCALAR,
220
+ };
221
+
222
+ BufferPool(const id<MTLDevice> Device, uint32_t Usage) :
223
+ device(Device), usage(Usage),
224
+ heaps(HeapBlock::Comparator), available_buffers(BufferBlock::Comparator) { }
225
+
226
+ const id<MTLDevice> device;
227
+ // usage flags to customize the pool for various purposes (see UsageFlags enum)
228
+ const uint32_t usage;
229
+ // total number of buffers in the pool
230
+ uint32_t n_buffers = 0;
231
+ // total allocations size on this pool
232
+ size_t allocated_size = 0;
233
+ // total memory available in the pool
234
+ size_t available_size = 0;
235
+ // list of heaps ordered by their "available" (not total) memory size
236
+ std::set<HeapBlock*, HeapComparison> heaps;
237
+ // list of only "available" buffers in the pool (i.e., buffers not in-use)
238
+ std::set<BufferBlock*, BufferComparison> available_buffers;
239
+ // list of buffers that are in a state of "limbo" where they've already been freed
240
+ // from PyTorch-side, but were not returned to pool due to still being
241
+ // in-use by command buffers with retainCount > 1. In this state, the buffer is
242
+ // neither ready to be recycled, nor could be returned to pool as available.
243
+ // These buffers will be returned to pool once the command buffer's
244
+ // completionHandler callbacks are called.
245
+ std::unordered_set<BufferBlock*> buffers_pending_free;
246
+ // list of heaps pending size update
247
+ std::unordered_set<HeapBlock*> heaps_pending_update;
248
+ };
249
+
250
+ class MPSHeapAllocatorImpl {
251
+ public:
252
+ explicit MPSHeapAllocatorImpl() :
253
+ m_device(at::mps::MPSDevice::getInstance()->device()),
254
+ m_max_buffer_size([m_device maxBufferLength]),
255
+ m_stream(getDefaultMPSStream()),
256
+ m_event_pool(getMPSEventPool()) {
257
+ init_allocator();
258
+ }
259
+ ~MPSHeapAllocatorImpl() {
260
+ emptyCache();
261
+ }
262
+ // interface exposed to at::Allocator
263
+ id<MTLBuffer> malloc(size_t size, uint32_t usage);
264
+ // frees a buffer and returns it into buffer pool
265
+ void free(void* ptr);
266
+ // releases all the cached buffers and their associated heaps
267
+ void emptyCache();
268
+ // free inactive buffers that are pending to be freed
269
+ void freeInactiveBuffers();
270
+ // returns true if buffer was allocated from the shared pool
271
+ bool isSharedBuffer(const void* ptr);
272
+ // get the requested unaligned size of an MTLBuffer
273
+ ssize_t getUnalignedBufferSize(const void* ptr);
274
+ // set the shape of a base tensor from a view tensor
275
+ void setBufferShape(const void* ptr, const IntArrayRef& shape);
276
+ // retrieve the shape of a base tensor from a view tensor
277
+ IntArrayRef getBufferShape(const void* ptr);
278
+ // get the unique ID of the buffer
279
+ id_t getBufferId(const void* ptr);
280
+ // allocate a buffer from a specialized pool to import CPU scalars into GPU
281
+ id<MTLBuffer> allocScalarBufferWithValue(void* value, size_t size);
282
+ // returns a CPU-mapping of the input buffer and its retainCount,
283
+ // if only it has Shared storage-mode and allocated on MPSAllocator
284
+ std::pair<const void*, uint32_t> getSharedBufferPtr(const void* buffer);
285
+ // records events for a list of MTLBuffers (list is used to lock the mutex once)
286
+ // returns true if records any event (given if passed buffers exist and are shared-storage)
287
+ bool recordEvents(c10::ArrayRef<const void*> buffers);
288
+ // waits for the event to signal the completion of GPU execution
289
+ // on the passed shared buffers (list is used to lock the mutex once)
290
+ // returns true if actually waited on any event
291
+ bool waitForEvents(c10::ArrayRef<const void*> buffers);
292
+ // this indicates how far (in Megabytes) the current total allocations are from the
293
+ // low watermark limit which is used to detect if we're under memory pressure
294
+ // This returns zero if we've reached the low watermark limit
295
+ ssize_t getLowWatermarkValue();
296
+ // (see m_low_watermark_ratio for description)
297
+ void setLowWatermarkRatio(double ratio);
298
+ // (see m_high_watermark_ratio for description)
299
+ void setHighWatermarkRatio(double ratio);
300
+ // (see m_low_watermark_limit for description)
301
+ size_t getLowWatermarkLimit() const { return m_low_watermark_limit; }
302
+ // (see m_max_total_allowed_size for description)
303
+ size_t getHighWatermarkLimit() const { return m_max_total_allowed_size; }
304
+ // (see m_total_allocated_memory for description)
305
+ size_t getTotalAllocatedMemory() const { return m_total_allocated_memory; }
306
+ // (see m_current_allocated_memory for description)
307
+ size_t getCurrentAllocatedMemory() const { return m_current_allocated_memory; }
308
+ // total GPU memory allocated in the process by Metal driver; including
309
+ // implicit allocations from MPS/MPSGraph frameworks and MPSHeapAllocatorImpl.
310
+ size_t getDriverAllocatedMemory() const { return current_allocated_size(); }
311
+ // (see enum DebugVerbosity for description)
312
+ uint32_t getDebugVerbosity() const { return m_debug_verbosity; }
313
+ // returns the device that we allocate from
314
+ inline id<MTLDevice> Device() const { return m_device; }
315
+
316
+ // TODO: make a common function to do size unit conversions in PyTorch.
317
+ inline std::string format_size(uint64_t size) const;
318
+
319
+ private:
320
+ // (see m_high_watermark_ratio for description)
321
+ constexpr static double default_high_watermark_ratio = 1.7;
322
+ // we set the allowed upper bound to twice the size of recommendedMaxWorkingSetSize.
323
+ constexpr static double default_high_watermark_upper_bound = 2.0;
324
+ // (see m_low_watermark_ratio for description)
325
+ // on unified memory, we could allocate beyond the recommendedMaxWorkingSetSize
326
+ constexpr static double default_low_watermark_ratio_unified = 1.4;
327
+ constexpr static double default_low_watermark_ratio_discrete = 1.0;
328
+
329
+ const id<MTLDevice> m_device;
330
+ std::recursive_mutex m_mutex;
331
+ // allocated buffers by device pointer
332
+ ska::flat_hash_map<const void*, BufferBlock*> m_allocated_buffers;
333
+ // using a container for pools to simplify iterating them
334
+ ska::flat_hash_map<BufferPool::Kind, std::unique_ptr<BufferPool>> m_pools;
335
+ // total memory allocated by HeapAllocator (including blocks in pools)
336
+ size_t m_total_allocated_memory = 0;
337
+ // currently active memory allocations in use (i.e., blocks not in pools)
338
+ size_t m_current_allocated_memory = 0;
339
+ // max buffer size allowed by Metal
340
+ size_t m_max_buffer_size = 0;
341
+ // maximum total size allowed to be allocated
342
+ size_t m_max_total_allowed_size = 0;
343
+ // high watermark ratio is a hard limit for the total allowed allocations
344
+ // 0. : disables high watermark limit (may cause system failure if system-wide OOM occurs)
345
+ // 1. : recommended maximum allocation size (i.e., device.recommendedMaxWorkingSetSize)
346
+ // >1.: allows limits beyond the device.recommendedMaxWorkingSetSize
347
+ // e.g., value 0.95 means we allocate up to 95% of recommended maximum
348
+ // allocation size; beyond that, the allocations would fail with OOM error.
349
+ double m_high_watermark_ratio;
350
+ // low watermark ratio is a soft limit to attempt limiting memory allocations up to the lower watermark
351
+ // level by garbage collection or committing command buffers more frequently (a.k.a, adaptive commit).
352
+ // Value between 0 to m_high_watermark_ratio (setting 0.0 disables adaptive commit and garbage collection)
353
+ // e.g., value 0.9 means we 'attempt' to limit allocations up to 90% of recommended maximum
354
+ // allocation size.
355
+ double m_low_watermark_ratio;
356
+ // low watermark size limit (in Bytes) at the time we initialize the allocator
357
+ size_t m_low_watermark_limit;
358
+ // use "PYTORCH_DEBUG_MPS_ALLOCATOR" env-var to set debug verbosity
359
+ uint32_t m_debug_verbosity;
360
+ // default MPS stream
361
+ MPSStream* m_stream;
362
+ // we hold a reference to MPSEventPool so it could get destroyed after MPSAllocator
363
+ std::shared_ptr<MPSEventPool> m_event_pool;
364
+
365
+ void init_allocator();
366
+ void init_buffer_pools();
367
+ HeapBlock* get_free_heap(AllocParams& params);
368
+ bool get_free_buffer(AllocParams& params);
369
+ BufferBlock* get_allocated_buffer_block(const void* ptr);
370
+ BufferBlock* alloc_buffer_block(size_t size, uint32_t usage);
371
+ bool alloc_buffer(AllocParams& params);
372
+ void free_buffer(BufferBlock* buffer_block);
373
+ // returns true if the container heap is also released
374
+ bool release_buffer(BufferBlock* buffer_block, bool remove_empty_heap = true);
375
+ void release_buffers(BufferPool& pool);
376
+ bool release_available_cached_buffers(AllocParams& params);
377
+ bool release_cached_buffers();
378
+ // free unused cached blocks to reclaim GPU memory if memory pressure is high
379
+ void garbage_collect_cached_buffers(AllocParams& params);
380
+ // returns the suitable buffer pool type for the usage or
381
+ // requested/allocated sizes
382
+ BufferPool& get_pool(size_t requested_size, size_t aligned_size, uint32_t usage);
383
+ // returns the aligned allocation size that is optimized
384
+ // for the buffers to get reused frequently
385
+ size_t get_allocation_size(size_t size, uint32_t usage) const;
386
+ // maximum size of device memory available for allocation in current process
387
+ // Note: the recommendedMaxWorkingSetSize is typically 75% of the total system memory.
388
+ size_t max_device_size() const { return [m_device recommendedMaxWorkingSetSize]; }
389
+ // there are implicit allocations from MPS backend, so we need to query the 'device' for
390
+ // total allocated size instead of manually tracking in MPSAllocator
391
+ size_t current_allocated_size() const { return [m_device currentAllocatedSize]; }
392
+
393
+ bool trigger_memory_callbacks(BufferBlock* buffer_block, IMpsAllocatorCallback::EventType event) const {
394
+ for (const auto& name : MPSAllocatorCallbacksRegistry()->Keys()) {
395
+ MPSAllocatorCallbacksRegistry()->Create(name)->executeMPSAllocatorCallback(buffer_block ? buffer_block->buffer : nullptr, event);
396
+ }
397
+ return true;
398
+ }
399
+ };
400
+
401
+ } // namespace at::mps::HeapAllocator
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/core/Allocator.h>
6
+ #include <c10/util/Registry.h>
7
+ #include <ATen/core/ATen_fwd.h>
8
+
9
+ #define MB(x) (x * 1048576UL)
10
+
11
+ namespace at::mps {
12
+
13
+ // this is a public interface to access MPSAllocator.
14
+ // Do not declare methods that would depend on MPS or Metal frameworks.
15
+ class IMPSAllocator : public c10::Allocator {
16
+ public:
17
+ // see the comments in MPSAllocator.h for the description of these methods.
18
+ virtual void emptyCache() const = 0;
19
+ virtual void freeInactiveBuffers() const = 0;
20
+ virtual ssize_t getUnalignedBufferSize(const void* ptr) const = 0;
21
+ virtual IntArrayRef getBufferShape(const void* ptr) const = 0;
22
+ virtual id_t getBufferId(const void* ptr) const = 0;
23
+ virtual void setBufferShape(const void* ptr, const IntArrayRef& shape) const = 0;
24
+ virtual bool isSharedBuffer(const void* ptr) const = 0;
25
+ virtual bool isSharedStorageSupported() const = 0;
26
+ virtual c10::DataPtr allocScalarBufferWithValue(void* value, size_t size) const = 0;
27
+ virtual std::string formatSize(size_t size) const = 0;
28
+ virtual void setLowWatermarkRatio(double ratio) const = 0;
29
+ virtual void setHighWatermarkRatio(double ratio) const = 0;
30
+ virtual ssize_t getLowWatermarkValue() const = 0;
31
+ virtual size_t getLowWatermarkLimit() const = 0;
32
+ virtual size_t getHighWatermarkLimit() const = 0;
33
+ virtual size_t getTotalAllocatedMemory() const = 0;
34
+ virtual size_t getCurrentAllocatedMemory() const = 0;
35
+ virtual size_t getDriverAllocatedMemory() const = 0;
36
+ virtual std::pair<const void*, uint32_t> getSharedBufferPtr(const void* ptr) const = 0;
37
+ virtual bool recordEvents(c10::ArrayRef<const void*> buffers) const = 0;
38
+ virtual bool waitForEvents(c10::ArrayRef<const void*> buffers) const = 0;
39
+ };
40
+
41
+ class IMpsAllocatorCallback {
42
+ public:
43
+ enum class EventType {
44
+ ALLOCATED, // buffer got allocated to be used immediately
45
+ RECYCLED, // buffer pulled from free list to be reused
46
+ FREED, // buffer put to free list for future recycling
47
+ RELEASED, // buffer memory released
48
+ ALLOCATION_FAILED // buffer allocation failed
49
+ };
50
+ virtual ~IMpsAllocatorCallback() = default;
51
+ virtual void executeMPSAllocatorCallback(void* ptr, EventType event) = 0;
52
+ };
53
+
54
+ // MPS allocator will execute every registered callback when a block of memory is freed.
55
+ C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback);
56
+ #define REGISTER_MPS_ALLOCATOR_CALLBACK(name, ...) \
57
+ C10_REGISTER_CLASS(MPSAllocatorCallbacksRegistry, name, __VA_ARGS__);
58
+
59
+ IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false);
60
+
61
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/Allocator.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+
9
+ #ifdef __OBJC__
10
+ #include <Foundation/Foundation.h>
11
+ #include <Metal/Metal.h>
12
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
13
+ typedef id<MTLDevice> MTLDevice_t;
14
+ typedef id<MTLLibrary> MTLLibrary_t;
15
+ typedef id<MTLComputePipelineState> MTLComputePipelineState_t;
16
+ typedef id<MTLLibrary> MTLLibrary_t;
17
+ #else
18
+ typedef void* MTLDevice;
19
+ typedef void* MTLDevice_t;
20
+ typedef void* MTLLibrary_t;
21
+ typedef void* MTLComputePipelineState_t;
22
+ typedef void* MTLLibrary_t;
23
+ #endif
24
+
25
+ using namespace std;
26
+
27
+ namespace at::mps {
28
+
29
+ // Helper enum to check if a MPSGraph op is supported in a given macOS version
30
+ enum class MacOSVersion : uint32_t {
31
+ MACOS_VER_13_0_PLUS = 0,
32
+ MACOS_VER_13_1_PLUS,
33
+ MACOS_VER_13_2_PLUS,
34
+ MACOS_VER_13_3_PLUS,
35
+ MACOS_VER_14_0_PLUS,
36
+ };
37
+
38
+ //-----------------------------------------------------------------
39
+ // MPSDevice
40
+ //
41
+ // MPSDevice is a singleton class that returns the default device
42
+ //-----------------------------------------------------------------
43
+
44
+ class TORCH_API MPSDevice {
45
+ public:
46
+ /**
47
+ * MPSDevice should not be cloneable.
48
+ */
49
+ MPSDevice(MPSDevice& other) = delete;
50
+ /**
51
+ * MPSDevice should not be assignable.
52
+ */
53
+ void operator=(const MPSDevice&) = delete;
54
+ /**
55
+ * Gets single instance of the Device.
56
+ */
57
+ static MPSDevice* getInstance();
58
+ /**
59
+ * Returns the single device.
60
+ */
61
+ MTLDevice_t device() {
62
+ return _mtl_device;
63
+ }
64
+ /**
65
+ * Returns whether running on Ventura or newer
66
+ */
67
+ bool isMacOS13Plus(MacOSVersion version) const;
68
+
69
+ MTLComputePipelineState_t metalIndexingPSO(const std::string &kernel);
70
+ MTLLibrary_t getMetalIndexingLibrary();
71
+
72
+ ~MPSDevice();
73
+
74
+ private:
75
+ static MPSDevice* _device;
76
+ MTLDevice_t _mtl_device;
77
+ MTLLibrary_t _mtl_indexing_library;
78
+ MPSDevice();
79
+ };
80
+
81
+ TORCH_API bool is_available();
82
+ TORCH_API bool is_macos_13_or_newer(MacOSVersion version = MacOSVersion::MACOS_VER_13_0_PLUS);
83
+ TORCH_API at::Allocator* GetMPSAllocator(bool useSharedAllocator = false);
84
+
85
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSStream.h>
6
+ #include <ctime>
7
+ #include <stack>
8
+
9
+ namespace at::mps {
10
+
11
+ // NOTE: don't create instances of this class directly.
12
+ // Use MPSEventPool to acquire instances of MPSEvent.
13
+ class MPSEvent {
14
+ public:
15
+ explicit MPSEvent(id_t ID, MPSStream* stream, bool enable_timing);
16
+ ~MPSEvent();
17
+
18
+ // records an event on the stream
19
+ void record(bool needsLock, bool syncEvent = false);
20
+ // makes all future work submitted to the stream wait for this event.
21
+ bool wait(bool needsLock, bool syncEvent = false);
22
+ // schedules a notifyListener callback for the event.
23
+ bool notify(bool needsLock, MTLSharedEventNotificationBlock block);
24
+ // checks if events are already signaled.
25
+ bool query() const;
26
+ // blocks the CPU thread until all the GPU work that were scheduled
27
+ // prior to recording this event are completed.
28
+ bool synchronize();
29
+ // resets this event with new parameters in case it gets reused from the event pool
30
+ void reset(MPSStream* stream, bool enable_timing);
31
+ // returns the unique ID of the event instance
32
+ id_t getID() const { return m_id; }
33
+ // returns the completion timestamp of the event
34
+ uint64_t getCompletionTime() const { return m_completion_time; }
35
+ // if already recorded, waits for cpu_sync_cv to be signaled
36
+ void waitForCpuSync();
37
+
38
+ private:
39
+ id_t m_id;
40
+ // enables measuring the completion time of the notifyListener of this event
41
+ bool m_enable_timing;
42
+ uint64_t m_signalCounter = 0;
43
+ MPSStream* m_stream = nullptr;
44
+ MTLSharedEvent_t m_event = nullptr;
45
+ MTLSharedEventListener* m_listener = nullptr;
46
+ // used to sync the events created on this Stream with CPU
47
+ std::mutex m_cpu_sync_mutex{};
48
+ std::condition_variable m_cpu_sync_cv{};
49
+ // CondVar predicate to sync the events created on this Stream with CPU
50
+ bool m_cpu_sync_completed = false;
51
+ // used to compute elapsed time
52
+ uint64_t m_completion_time = 0;
53
+
54
+ void recordLocked(bool syncEvent);
55
+ bool waitLocked(bool syncEvent);
56
+ bool notifyLocked(MTLSharedEventNotificationBlock block);
57
+ void notifyCpuSync();
58
+ static uint64_t getTime() {
59
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
60
+ }
61
+ };
62
+
63
+ typedef std::unique_ptr<MPSEvent, std::function<void(MPSEvent*)>> MPSEventPtr;
64
+
65
+ class MPSEventPool {
66
+ public:
67
+ explicit MPSEventPool(MPSStream* default_stream);
68
+ ~MPSEventPool();
69
+
70
+ MPSEventPtr acquireEvent(bool enable_timing, MPSStream* stream);
71
+ void emptyCache();
72
+
73
+ // these are mainly used for MPSHooks and torch.mps.Event() bindings
74
+ id_t acquireEvent(bool enable_timing);
75
+ void releaseEvent(id_t event_id);
76
+ void recordEvent(id_t event_id, bool syncEvent);
77
+ void waitForEvent(id_t event_id, bool syncEvent);
78
+ void synchronizeEvent(id_t event_id);
79
+ bool queryEvent(id_t event_id);
80
+ // returns elapsed time between two recorded events in milliseconds
81
+ double elapsedTime(id_t start_event_id, id_t end_event_id);
82
+
83
+ private:
84
+ MPSStream* m_default_stream = nullptr;
85
+ std::recursive_mutex m_mutex;
86
+ std::stack<std::unique_ptr<MPSEvent>> m_pool{};
87
+ // dictionary to associate event IDs with event objects
88
+ // used to retain in-use events out of the pool
89
+ // for torch.mps.Event() bindings.
90
+ std::unordered_map<id_t, MPSEventPtr> m_in_use_events{};
91
+ uint64_t m_event_counter = 0;
92
+ std::function<void(MPSEvent*)> m_default_deleter;
93
+
94
+ MPSEvent* getInUseEvent(id_t event_id, bool locked = true);
95
+ };
96
+
97
+ // shared_ptr is used to get MPSEventPool destroyed after dependent instances
98
+ std::shared_ptr<MPSEventPool> getMPSEventPool();
99
+
100
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/core/Generator.h>
6
+ #include <ATen/core/PhiloxRNGEngine.h>
7
+ #include <c10/core/GeneratorImpl.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at {
11
+ namespace mps::detail {
12
+
13
+ static const uint32_t PHILOX_STATE_N = 7;
14
+ struct rng_data_pod {
15
+ std::array<uint32_t, PHILOX_STATE_N> state{1};
16
+ uint64_t seed = default_rng_seed_val;
17
+ };
18
+
19
+ TORCH_API const Generator& getDefaultMPSGenerator();
20
+ TORCH_API Generator createMPSGenerator(uint64_t seed_val = default_rng_seed_val);
21
+
22
+ } // namespace mps::detail
23
+
24
+ struct TORCH_API MPSGeneratorImpl : public c10::GeneratorImpl {
25
+ // Constructors
26
+ MPSGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
27
+ ~MPSGeneratorImpl() override = default;
28
+
29
+ // MPSGeneratorImpl methods
30
+ std::shared_ptr<MPSGeneratorImpl> clone() const;
31
+ void set_current_seed(uint64_t seed) override;
32
+ void set_offset(uint64_t offset) override;
33
+ uint64_t get_offset() const override;
34
+ uint64_t current_seed() const override;
35
+ uint64_t seed() override;
36
+ void set_state(const c10::TensorImpl& new_state) override;
37
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
38
+ void update_philox_counters();
39
+
40
+ void set_engine(at::Philox4_32 engine) { engine_ = engine; };
41
+ at::Philox4_32 engine() { return engine_; };
42
+ uint32_t* state_data() { return data_.state.data(); }
43
+ static DeviceType device_type() { return DeviceType::MPS; };
44
+
45
+ private:
46
+ mps::detail::rng_data_pod data_;
47
+ at::Philox4_32 engine_;
48
+
49
+ MPSGeneratorImpl* clone_impl() const override;
50
+ };
51
+
52
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <ATen/Context.h>
8
+ #include <ATen/mps/MPSStream.h>
9
+ #include <ATen/mps/MPSEvent.h>
10
+
11
+ #ifdef __OBJC__
12
+ #include <Foundation/Foundation.h>
13
+ #include <Metal/Metal.h>
14
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
15
+ #endif
16
+
17
+ #include <ATen/Tensor.h>
18
+ #include <c10/core/MemoryFormat.h>
19
+ #include <c10/core/Storage.h>
20
+ #include <c10/core/TensorImpl.h>
21
+ #include <sys/_types/_size_t.h>
22
+ #include <memory>
23
+ #include <c10/core/UndefinedTensorImpl.h>
24
+ #include <c10/util/intrusive_ptr.h>
25
+
26
+
27
+ namespace at::mps {
28
+
29
+ typedef MPSEvent* mpsEvent_t;
30
+
31
+ // TODO: Move the MPSGuardImpl to inherit from NoOpDeviceGuardImpl
32
+ // https://github.com/pytorch/pytorch/issues/77170
33
+ struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface {
34
+ static constexpr c10::DeviceType static_type = c10::DeviceType::MPS;
35
+
36
+ // constructor
37
+ MPSGuardImpl() {}
38
+ explicit MPSGuardImpl(c10::DeviceType t) {
39
+ TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS);
40
+ }
41
+
42
+ // returns the type
43
+ c10::DeviceType type() const override {
44
+ return c10::DeviceType::MPS;
45
+ }
46
+
47
+ Device exchangeDevice(Device d) const override {
48
+ return Device(c10::DeviceType::MPS, 0);
49
+ }
50
+
51
+ Device getDevice() const override {
52
+ return Device(c10::DeviceType::MPS, 0);
53
+ }
54
+
55
+ c10::optional<Device> uncheckedGetDevice() const noexcept {
56
+ return Device(c10::DeviceType::MPS, 0);
57
+ }
58
+
59
+ void setDevice(Device d) const override {
60
+ TORCH_INTERNAL_ASSERT(d.is_mps());
61
+ }
62
+
63
+ void uncheckedSetDevice(Device d) const noexcept override {
64
+ // TODO: Currently setting only device 0
65
+ }
66
+
67
+ Stream getStream(Device d) const noexcept override {
68
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
69
+ }
70
+
71
+ Stream getDefaultStream(Device d) const override {
72
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
73
+ }
74
+
75
+ // NB: These do NOT set the current device
76
+ Stream exchangeStream(Stream s) const noexcept override {
77
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
78
+ }
79
+ DeviceIndex deviceCount() const noexcept override {
80
+ if (at::hasMPS()) {
81
+ //TODO: extend it for multi-device case
82
+ return 1;
83
+ } else {
84
+ return 0;
85
+ }
86
+ }
87
+
88
+ // Event-related functions
89
+ void createEvent(
90
+ mpsEvent_t* event,
91
+ const EventFlag flag) const;
92
+
93
+ void destroyEvent(
94
+ void* event,
95
+ const DeviceIndex device_index) const noexcept override;
96
+
97
+ void record(
98
+ void** event,
99
+ const Stream& stream,
100
+ const DeviceIndex device_index,
101
+ const EventFlag flag) const override;
102
+
103
+ void block(
104
+ void* event,
105
+ const Stream& stream) const override;
106
+
107
+ bool queryEvent(void* event) const override;
108
+
109
+ };
110
+
111
+ /// A variant of OptionalDeviceGuard that is specialized for MPS.
112
+ struct OptionalMPSGuard {
113
+ explicit OptionalMPSGuard() : guard_() {}
114
+
115
+ explicit OptionalMPSGuard(c10::optional<Device> device_opt)
116
+ : guard_(device_opt) {}
117
+
118
+ /// Set the current MPS device to the passed device index, if it is not
119
+ /// nullopt
120
+ explicit OptionalMPSGuard(c10::optional<DeviceIndex> device_index_opt)
121
+ : guard_(device_index_opt) {}
122
+
123
+ // Copy is not allowed
124
+ OptionalMPSGuard(const OptionalMPSGuard&) = delete;
125
+ OptionalMPSGuard& operator=(const OptionalMPSGuard&) = delete;
126
+ OptionalMPSGuard(OptionalMPSGuard&& other) = delete;
127
+ OptionalMPSGuard& operator=(OptionalMPSGuard&& other) = delete;
128
+
129
+ /// Sets the MPS device to the given device, initializing the guard if it
130
+ /// is not already initialized. Errors if the given device is not a MPS
131
+ /// device.
132
+ void set_device(Device device) {
133
+ guard_.set_device(device);
134
+ }
135
+
136
+ /// Sets the MPS device to the given device, initializing the guard if it is
137
+ /// not already initialized. Errors if the given device is not a MPS device.
138
+ void reset_device(Device device) {
139
+ guard_.reset_device(device);
140
+ }
141
+
142
+ /// Sets the MPS device to the given device index, initializing the guard if
143
+ /// it is not already initialized.
144
+ void set_index(DeviceIndex device_index) {
145
+ guard_.set_index(device_index);
146
+ }
147
+
148
+ /// Returns the device that was set immediately prior to initialization of the
149
+ /// guard, or nullopt if the guard is uninitialized.
150
+ c10::optional<Device> original_device() const {
151
+ return guard_.original_device();
152
+ }
153
+
154
+ /// Returns the most recent device that was set using this device guard,
155
+ /// either from construction, or via set_device, if the guard is initialized,
156
+ /// or nullopt if the guard is uninitialized.
157
+ c10::optional<Device> current_device() const {
158
+ return guard_.current_device();
159
+ }
160
+
161
+ /// Restore the original MPS device, resetting this guard to uninitialized
162
+ /// state.
163
+ void reset() {
164
+ guard_.reset();
165
+ }
166
+
167
+ private:
168
+ c10::impl::InlineOptionalDeviceGuard<MPSGuardImpl> guard_;
169
+ };
170
+
171
+
172
+ C10_REGISTER_GUARD_IMPL(MPS, MPSGuardImpl);
173
+
174
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/detail/MPSHooksInterface.h>
6
+ #include <ATen/Generator.h>
7
+ #include <ATen/mps/MPSEvent.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at::mps {
11
+
12
+ // The real implementation of MPSHooksInterface
13
+ struct MPSHooks : public at::MPSHooksInterface {
14
+ MPSHooks(at::MPSHooksArgs) {}
15
+ void initMPS() const override;
16
+
17
+ // MPSDevice interface
18
+ bool hasMPS() const override;
19
+ bool isOnMacOSorNewer(unsigned major, unsigned minor) const override;
20
+
21
+ // MPSGeneratorImpl interface
22
+ const Generator& getDefaultMPSGenerator() const override;
23
+
24
+ // MPSStream interface
25
+ void deviceSynchronize() const override;
26
+ void commitStream() const override;
27
+ void* getCommandBuffer() const override;
28
+ void* getDispatchQueue() const override;
29
+
30
+ // MPSAllocator interface
31
+ Allocator* getMPSDeviceAllocator() const override;
32
+ void emptyCache() const override;
33
+ size_t getCurrentAllocatedMemory() const override;
34
+ size_t getDriverAllocatedMemory() const override;
35
+ void setMemoryFraction(double ratio) const override;
36
+
37
+ // MPSProfiler interface
38
+ void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const override;
39
+ void profilerStopTrace() const override;
40
+
41
+ // MPSEvent interface
42
+ uint32_t acquireEvent(bool enable_timing) const override;
43
+ void releaseEvent(uint32_t event_id) const override;
44
+ void recordEvent(uint32_t event_id) const override;
45
+ void waitForEvent(uint32_t event_id) const override;
46
+ void synchronizeEvent(uint32_t event_id) const override;
47
+ bool queryEvent(uint32_t event_id) const override;
48
+ double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const override;
49
+
50
+ // Compatibility with Accelerator API
51
+ bool hasPrimaryContext(DeviceIndex device_index) const override {
52
+ // When MPS is available, it is always in use for the one device.
53
+ return true;
54
+ }
55
+ };
56
+
57
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/mps/MPSStream.h>
7
+ #include <ATen/mps/MPSAllocatorInterface.h>
8
+
9
+ #include <os/signpost.h>
10
+ #include <os/log.h>
11
+
12
+ #include <sstream>
13
+ #include <string>
14
+ #include <atomic>
15
+ #include <unordered_map>
16
+ #include <utility>
17
+ #include <ctime>
18
+
19
+ namespace at::mps {
20
+
21
+ namespace Profiler {
22
+
23
+ struct BaseInfo {
24
+ // profiling info types
25
+ enum class Type {
26
+ GRAPH,
27
+ KERNEL,
28
+ COPY,
29
+ CPU_FALLBACK,
30
+ };
31
+
32
+ BaseInfo(Type infoType, uint64_t Id, const uintptr_t Handle) :
33
+ type(infoType), profileId(Id), handle(Handle) { }
34
+ virtual ~BaseInfo() = default;
35
+
36
+ // type of profiling info
37
+ Type type;
38
+ // unique profile ID for execution instances of operations or copies
39
+ uint64_t profileId;
40
+ // ID generated by os_signpost
41
+ // since it's possible to use event and interval-based signposts at the
42
+ // same time, we need separate IDs for each.
43
+ os_signpost_id_t eventSignpostId = 0, intervalSignpostId = 0;
44
+ // accumulated GPU time in ms (obtained from CompletionHandler's "GPUEndTime - GPUStartTime")
45
+ std::atomic<double> totalGpuTime{0.0};
46
+ // accumulated Scheduling time in ms (obtained from CompletionHandler's "KernelEndTime - KernelStartTime")
47
+ std::atomic<double> totalSchedulingTime{0.0};
48
+ // indicates if the operation or copy execution has completed
49
+ std::atomic_bool completed{false};
50
+ // handle used to identify the profile info's instance (usually the pointer)
51
+ const uintptr_t handle;
52
+
53
+ virtual const std::string toString(double gpuTime = 0, double schedulingTime = 0) const;
54
+ // builds a string for a tensor (format: Device:ScalarType[tensor.sizes()])
55
+ static std::string buildTensorString(const Tensor& tensor, bool includeBufferId = false) {
56
+ if (tensor.defined()) {
57
+ std::stringstream tensorStr;
58
+ auto deviceType = tensor.device().type();
59
+ tensorStr << c10::DeviceTypeName(deviceType);
60
+ // see comments for INCLUDE_BUFFER_ID
61
+ if (includeBufferId && deviceType == at::kMPS) {
62
+ id<MTLBuffer> buffer = __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
63
+ tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer))
64
+ << ":" << buffer.retainCount << ")";
65
+ }
66
+ tensorStr << ":"
67
+ << tensor.scalar_type() << tensor.sizes();
68
+ return tensorStr.str();
69
+ } else {
70
+ return "undefined";
71
+ }
72
+ }
73
+ static uint64_t getTime() {
74
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
75
+ }
76
+ };
77
+
78
+ struct OperationInfo : BaseInfo {
79
+ OperationInfo(const void* Handle, bool IsGraph, uint64_t Id, const std::string& StrKey) :
80
+ BaseInfo(IsGraph ? Type::GRAPH : Type::KERNEL, Id, uintptr_t(Handle)), strKey(StrKey) { }
81
+
82
+ uint64_t runCount = 0;
83
+ std::string strKey;
84
+
85
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
86
+
87
+ // builds a string for a kernel
88
+ static std::string buildKernelString(const std::string& kernelName,
89
+ const TensorList& tensors,
90
+ bool includeBufferId = false) {
91
+ std::stringstream kernelStr;
92
+ kernelStr << kernelName;
93
+ for (const Tensor& tensor: tensors) {
94
+ kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId);
95
+ }
96
+ return kernelStr.str();
97
+ }
98
+ };
99
+
100
+ struct CpuFbInfo : BaseInfo {
101
+ CpuFbInfo(uint64_t Id, const std::string& OpName) :
102
+ BaseInfo(Type::CPU_FALLBACK, Id, 0), opName(OpName) { }
103
+
104
+ uint64_t runCount = 0;
105
+ // the current and total overhead of copies in bytes required to convert the Op's
106
+ // input tensors from MPS to CPU and then output from CPU back to MPS
107
+ size_t currentCopyOverhead = 0;
108
+ size_t totalCopyOverhead = 0;
109
+ std::string opName;
110
+ std::string strKey;
111
+ uint64_t startTime = 0;
112
+
113
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
114
+
115
+ void updateCopyOverhead(const TensorList& tensors) {
116
+ currentCopyOverhead = 0;
117
+ for (const Tensor& tensor: tensors) {
118
+ if (tensor.defined()) {
119
+ currentCopyOverhead += tensor.nbytes();
120
+ }
121
+ }
122
+ totalCopyOverhead += currentCopyOverhead;
123
+ }
124
+ };
125
+
126
+ struct CopyInfo : BaseInfo {
127
+ enum class Kind {
128
+ MPS_TO_MPS,
129
+ MPS_TO_CPU,
130
+ CPU_TO_MPS,
131
+ };
132
+
133
+ CopyInfo(const void* Handle, size_t Length, uint64_t Id, bool IsNonBlocking, bool UsesBlitter) :
134
+ BaseInfo(Type::COPY, Id, uintptr_t(Handle)), kind(Kind::MPS_TO_MPS),
135
+ length(Length), isNonBlocking(IsNonBlocking), usesBlitter(UsesBlitter) { }
136
+
137
+ Kind kind;
138
+ size_t length;
139
+ bool isNonBlocking;
140
+ bool usesBlitter;
141
+ std::string srcStrKey;
142
+ std::string dstStrKey;
143
+ // for copies that don't use blitters, we measure CPU time
144
+ uint64_t startTime = 0;
145
+
146
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
147
+
148
+ static std::string buildTensorString(const void* buffer, const OptionalTensorRef tensor, bool includeBufferId = false);
149
+
150
+ static bool isStorageOnMPS(const void* buffer, const OptionalTensorRef tensor) {
151
+ if (tensor.has_value()) {
152
+ return tensor->device().type() == at::kMPS;
153
+ }
154
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(buffer);
155
+ // getUnalignedBufferSize() returns -1 if input buffer is not on MPS device
156
+ return getIMPSAllocator()->getUnalignedBufferSize(buffer) >= 0;
157
+ }
158
+
159
+ static Kind getCopyKind(const void* srcBuffer, const void* dstBuffer,
160
+ const OptionalTensorRef srcTensor, const OptionalTensorRef dstTensor) {
161
+ const bool isSrcOnMPS = isStorageOnMPS(srcBuffer, srcTensor);
162
+ const bool isDstOnMPS = isStorageOnMPS(dstBuffer, dstTensor);
163
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isSrcOnMPS || isDstOnMPS);
164
+ if (isSrcOnMPS && !isDstOnMPS) {
165
+ return Kind::MPS_TO_CPU;
166
+ } else if (!isSrcOnMPS && isDstOnMPS) {
167
+ return Kind::CPU_TO_MPS;
168
+ }
169
+ return Kind::MPS_TO_MPS;
170
+ }
171
+ };
172
+
173
+ struct CopyStat : CopyInfo {
174
+ explicit CopyStat(std::string CopyKindStr) :
175
+ CopyInfo(nullptr, 0, 0, false, false), kindStr(std::move(CopyKindStr)) {}
176
+ // total number of copies
177
+ size_t totalCount = 0;
178
+ // number of Scalar copies (i.e., less than sizeof(int64))
179
+ size_t scalarsCount = 0;
180
+ // number of blocking copies (i.e., require syncing to GPU)
181
+ size_t blockingCount = 0;
182
+ // number of copies that used memcpy(), instead of Metal Blit Encoder
183
+ size_t memcpyCount = 0;
184
+ // accumulated GPU time in ms for the scalar copies
185
+ std::atomic<double> scalarsGpuTime{0.0};
186
+ // copy kind in string type
187
+ std::string kindStr;
188
+ };
189
+
190
+ class MPSProfiler {
191
+ public:
192
+ // lower 16 bits used for profiler options
193
+ enum ProfileOptions : uint32_t {
194
+ OPTIONS_NONE = 0,
195
+ // ALL_* means, all signpost types (RUN_OPERATION|BLIT_COPY|CPU_FALLBACK, etc.)
196
+ // (used for convenience to not compute bit flags by OR-ing manually)
197
+ // trace all signpost types using events
198
+ ALL_SIGNPOST_EVENTS = (1 << 0),
199
+ // trace all signpost types using intervals
200
+ ALL_SIGNPOST_INTERVALS = (1 << 1),
201
+ // always wait for command buffer to finish executing after each commit
202
+ WAIT_UNTIL_COMPLETED = (1 << 2),
203
+ // for interval-based signposts, include the scheduling portion of
204
+ // Graph/Kernel/Copy executions as well.
205
+ // if flag is disable, only "GPU run time" is included in interval,
206
+ // and not schedule time.
207
+ INCLUDE_SCHEDULE_INTERVAL = (1 << 3),
208
+
209
+ // use these if you need to trace signposts types individually (rarely required)
210
+ // trace signpost using intervals
211
+ USE_INTERVALS = (1 << 4),
212
+ // trace signpost by emitting events
213
+ USE_EVENTS = (1 << 5),
214
+ // used for sanity check (Change this when new option added)
215
+ OPTIONS_COUNT = (USE_EVENTS << 1) - 1,
216
+ };
217
+
218
+ // when adding new types, #define the type string in MPSProfiler.mm as well.
219
+ // upper 16 bits used for event types
220
+ enum SignpostTypes : uint32_t {
221
+ SIGNPOST_NONE = 0,
222
+ // trace signposts for PyTorch operation executions
223
+ RUN_OPERATION = (1 << 16),
224
+ // trace signposts for blitter copies
225
+ BLIT_COPY = (1 << 17),
226
+ // trace signposts for ops that fall back on CPU
227
+ CPU_FALLBACK = (1 << 18),
228
+ // used for sanity check (Change this when new type added)
229
+ SIGNPOST_COUNT = (CPU_FALLBACK << 1) - 1,
230
+ };
231
+
232
+ enum LogOptions : uint32_t {
233
+ LOG_NONE = 0,
234
+
235
+ // Info logging options during execution
236
+ // -------------------------------------
237
+ // prints operation info (id/key/run_count) during execution
238
+ OPERATION_INFO = (1 << 0),
239
+ // prints copy info (src/dst tensors/buffers, size, etc.) during execution
240
+ COPY_INFO = (1 << 1),
241
+ // prints CPU Fallback info (id/runCount/opName/copyOverhead) during execution
242
+ CPU_FALLBACK_INFO = (1 << 2),
243
+
244
+ // Profiling Statistics logging options when process terminates
245
+ // ------------------------------------------------------------
246
+ // prints all stats (OPERATION_STATS, COPY_STATS, CPU_FALLBACK_STATS) before process terminates
247
+ // this is convenient to not combine following stats bit flags manually
248
+ ALL_STATS = (1 << 3),
249
+ // prints operation stats (GPU times, run count, etc.) before process terminates
250
+ OPERATION_STATS = (1 << 4),
251
+ // prints copies stats (GPU times, copy kinds, sizes, etc.) before process terminates
252
+ COPY_STATS = (1 << 5),
253
+ // prints CPU Fallback stats (CPU times, run times, size of MPS<->CPU copies
254
+ // for tensors, etc.) before process terminates
255
+ CPU_FALLBACK_STATS = (1 << 6),
256
+
257
+ // Metadata format options when logging the info
258
+ // ---------------------------------------------
259
+ // if enabled, includes GPU run time in metadata (i.e., GPUEndTime-GPUStartTime
260
+ // from Metal Command Buffers) (e.g., [GPU=0.324 ms])
261
+ INCLUDE_GPU_TIME = (1 << 7),
262
+ // if enabled, includes GPU scheduling time in metadata separately
263
+ // (i.e., KernelEndTime-KernelStartTime from Metal Command Buffers)
264
+ // e.g., [GPU=0.324 ms, KRNL=0.036 ms]
265
+ INCLUDE_KERNEL_TIME = (1 << 8),
266
+ // if enabled, includes the unique buffer ID in metadata for the storage
267
+ // of a tensor that was allocated on MPSAllocator. This is useful (along with
268
+ // the EV "PYTORCH_DEBUG_MPS_ALLOCATOR") to identify buffers that are involved
269
+ // with various operations.
270
+ INCLUDE_BUFFER_ID = (1 << 9),
271
+
272
+ // used for sanity check (Change this when new option added)
273
+ LOG_COUNT = (INCLUDE_BUFFER_ID << 1) - 1,
274
+ };
275
+
276
+ explicit MPSProfiler();
277
+ ~MPSProfiler();
278
+
279
+ // the handle is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
280
+ // the beginProfile*() functions return a profileId which is unique per graph/kernel/copy
281
+ uint64_t beginProfileKernel(const void* handle, const std::string& strKey, bool isGraph);
282
+ uint64_t beginProfileKernel(const void* handle, const std::string& kernelName, const TensorList& tensors);
283
+ uint64_t beginProfileCopy(const void* srcBuffer, const void* dstBuffer,
284
+ const OptionalTensorRef srcTensor,
285
+ const OptionalTensorRef dstTensor,
286
+ size_t length, bool isNonBlocking, bool usesBlitter = true);
287
+ uint64_t beginProfileCPUFallback(const std::string& opName, const TensorList& tensors);
288
+ void beginProfileGPUInterval(const void* handle);
289
+
290
+ void endProfileCopy(uint64_t profileId, SyncType syncType);
291
+ void endProfileKernel(const void* handle, SyncType syncType = SyncType::NONE);
292
+ void endProfileCPUFallback(const std::string& opName);
293
+
294
+ // these are used to hook into Python bindings for torch.mps.profiler module.
295
+ // this enables generating OS Signpost traces from MPSProfiler on-demand
296
+ // during runtime (instead of environment variables).
297
+ // The "mode" could be either "interval", "event", or both "interval,event"
298
+ // for interval-based and/or event-based signpost tracing.
299
+ void StartTrace(const string& mode, bool waitUntilCompleted);
300
+ void StopTrace();
301
+
302
+ // convenience functions to indicate whether signpost tracing or
303
+ // logging are enabled for the SignpostTypes
304
+ bool isOperationProfilingEnabled() const {
305
+ return (m_signpost_types & SignpostTypes::RUN_OPERATION) ||
306
+ (m_log_options & (LogOptions::OPERATION_INFO | LogOptions::OPERATION_STATS));
307
+ }
308
+ bool isCopyProfilingEnabled() const {
309
+ return (m_signpost_types & SignpostTypes::BLIT_COPY) ||
310
+ (m_log_options & (LogOptions::COPY_INFO | LogOptions::COPY_STATS));
311
+ }
312
+ bool isCPUFallbackProfilingEnabled() const {
313
+ return (m_signpost_types & SignpostTypes::CPU_FALLBACK) ||
314
+ (m_log_options & (LogOptions::CPU_FALLBACK_INFO | LogOptions::CPU_FALLBACK_STATS));
315
+ }
316
+ bool isSignpostTracingEnabled() const {
317
+ return (m_signpost_types != SignpostTypes::SIGNPOST_NONE);
318
+ }
319
+
320
+ private:
321
+ // indicates what type of signpost types are enabled and traced by MPS profiler.
322
+ uint32_t m_signpost_types = 0;
323
+ uint32_t m_profile_options = 0;
324
+ uint32_t m_log_options = 0;
325
+ uint64_t m_kernel_counter = 0;
326
+ uint64_t m_graph_counter = 0;
327
+ uint64_t m_cpu_fb_counter = 0;
328
+ uint64_t m_copy_counter = 0;
329
+ // technically, it's possible to trace both events and intervals at the same time
330
+ // so we use separate os_log categories for them
331
+ os_log_t m_os_log_events;
332
+ os_log_t m_os_log_intervals;
333
+ // stats logging could run either from destructor or signal handler
334
+ // so this is used to check if logging has already started.
335
+ std::atomic_bool hasLoggedStats{false};
336
+ // indicates there are pending completionHandler callbacks that haven't been called yet.
337
+ std::atomic_bool hasPendingCompletionHandlers{false};
338
+ // used to capture sigint signal to log profiling stats
339
+ static struct sigaction currentSigint, previousSigint;
340
+
341
+ // We use the following lists for two reasons:
342
+ // 1- for interval-based signposts the "begin" point won't be in same function
343
+ // as the "end" point where we need to be able to retrieve signpost's info
344
+ // 2- if Operations info need to be logged when process ends using LogOptions::OPERATION_INFO.
345
+
346
+ // the pointer key for this map is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
347
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
348
+ std::unordered_map<uintptr_t, std::unique_ptr<OperationInfo>> m_op_info_list{};
349
+ // the string key for this map is the op name that we fall back to execute on CPU
350
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
351
+ std::unordered_map<std::string, std::unique_ptr<CpuFbInfo>> m_cpu_fb_info_list{};
352
+ // this list contains the info for copies, and its key is the unique profileId
353
+ // which is generated from m_copy_counter
354
+ // The copyInfo list is not retained.
355
+ std::unordered_map<uint64_t, std::unique_ptr<CopyInfo>> m_copy_info_list{};
356
+ // a short list that contains copy stats
357
+ std::unordered_map<CopyInfo::Kind, std::unique_ptr<CopyStat>> m_copy_stat_list{};
358
+
359
+ void initialize();
360
+ void beginProfileExecution(BaseInfo& info, bool cpuExecution = false);
361
+ void endProfileExecution(BaseInfo& info, os_signpost_id_t event_signpost_id,
362
+ os_signpost_id_t interval_signpost_id,
363
+ double gpuTime, double schedulingTime);
364
+ void addProfilerScheduledHandler(BaseInfo& info);
365
+ void addProfilerCompletedHandler(BaseInfo& info, SyncType syncType);
366
+ void emitSignpostEvent(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
367
+ const std::string& msg) const;
368
+ void beginSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
369
+ const std::string& msg) const;
370
+ void endSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id) const;
371
+
372
+ void updateCopyStats(const CopyInfo& copyInfo, double gpuTime, double schedulingTime);
373
+ // returns true if logging the profiling info "during the execution" is enabled
374
+ bool isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isExecutionEnded);
375
+ // logs all the profiling stats that are enabled
376
+ void logProfilingStats();
377
+ // logs kernel profiling stats when the process ends.
378
+ void logOperationsProfilingStats(std::FILE* f) const;
379
+ // logs CPU Fallback profiling stats when the process ends.
380
+ void logCPUFallbackProfilingStats(std::FILE* f) const;
381
+ // logs copy profiling stats when the process ends.
382
+ void logCopyProfilingStats(std::FILE* f) const;
383
+
384
+ os_signpost_id_t generateSignpostId(os_signpost_type_t signpostType, const void* ptr = nullptr);
385
+ static SignpostTypes getSignpostType(BaseInfo::Type infoType);
386
+ static void handleIntSignal(int signal);
387
+ };
388
+
389
+ } // namespace Profiler
390
+
391
+ Profiler::MPSProfiler& getMPSProfiler();
392
+
393
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <c10/core/DeviceGuard.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/core/Stream.h>
11
+ #include <ATen/mps/MPSDevice.h>
12
+
13
+ #ifdef __OBJC__
14
+ #include <Foundation/Foundation.h>
15
+ #include <Metal/Metal.h>
16
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
17
+ #include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
18
+ typedef id<MTLCommandQueue> MTLCommandQueue_t;
19
+ typedef id<MTLCommandBuffer> MTLCommandBuffer_t;
20
+ typedef id<MTLComputeCommandEncoder> MTLComputeCommandEncoder_t;
21
+ typedef id<MTLSharedEvent> MTLSharedEvent_t;
22
+ typedef id<MTLDevice> MTLDevice_t;
23
+ #else
24
+ typedef void* MTLCommandQueue_t;
25
+ typedef void* MTLCommandQueue;
26
+ typedef void* MTLCommandBuffer_t;
27
+ typedef void* MTLCommandBuffer;
28
+ typedef void* MTLComputeCommandEncoder_t;
29
+ typedef void* MTLSharedEvent_t;
30
+ typedef void* dispatch_queue_t;
31
+ typedef void* MTLDevice_t;
32
+ #define nil NULL;
33
+ #endif
34
+
35
+
36
+ namespace at::mps {
37
+
38
+ //-----------------------------------------------------------------
39
+ // MPSStream
40
+ //-----------------------------------------------------------------
41
+
42
+ enum class SyncType {
43
+ NONE, // no commit to command buffer
44
+ COMMIT, // commit and flush the command buffer
45
+ COMMIT_AND_WAIT, // flush and wait for command buffer execution to finish
46
+ COMMIT_AND_CONTINUE,// commit and continue with a new underlying command buffer
47
+ COMMIT_ADAPTIVE, // commit adaptively based on available memory
48
+ };
49
+
50
+ class TORCH_API MPSStream
51
+ {
52
+ public:
53
+ enum Unchecked { UNCHECKED };
54
+
55
+ /// Construct a MPSStream from a Stream. This construction is checked,
56
+ /// and will raise an error if the Stream is not, in fact, a MPS stream.
57
+ explicit MPSStream(Stream stream);
58
+
59
+ ~MPSStream();
60
+ MTLCommandQueue_t commandQueue() const { return _commandQueue; };
61
+ dispatch_queue_t queue() const { return _serialQueue; }
62
+
63
+ MPSCommandBuffer* commandBuffer();
64
+ MTLComputeCommandEncoder_t commandEncoder();
65
+ void endKernelCoalescing();
66
+ void synchronize(SyncType syncType);
67
+ void fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t offset, SyncType syncType = SyncType::NONE);
68
+ void copy(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
69
+ size_t length, size_t srcOffset, size_t dstOffset,
70
+ uint64_t profileId, SyncType syncType = SyncType::NONE);
71
+ void copy_and_sync(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
72
+ size_t length, size_t srcOffset, size_t dstOffset,
73
+ bool non_blocking, uint64_t profileId);
74
+ void executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDictionary* results, SyncType syncType = SyncType::NONE);
75
+ void addCompletedHandler(MTLCommandBufferHandler block);
76
+
77
+ /// Get the MPS device index that this stream is associated with.
78
+ c10::DeviceIndex device_index() const { return _stream.device_index(); }
79
+
80
+ MTLCommandQueue_t stream() const { return _commandQueue; };
81
+
82
+ MTLDevice_t device() const { return [_commandQueue device];}
83
+
84
+ /// Explicit conversion to Stream.
85
+ Stream unwrap() const { return _stream; }
86
+
87
+ private:
88
+ Stream _stream;
89
+ MTLCommandQueue_t _commandQueue = nil;
90
+ MPSCommandBuffer* _commandBuffer = nil;
91
+ MPSCommandBuffer* _prevCommandBuffer = nil;
92
+ MTLComputeCommandEncoder_t _commandEncoder = nil;
93
+ MPSGraphExecutionDescriptor *_executionDescriptor = nil;
94
+ MPSGraphCompilationDescriptor *_compilationDescriptor = nil;
95
+ dispatch_queue_t _serialQueue = nullptr;
96
+ // CommitAndContinue is enabled by default
97
+ bool _enableCommitAndContinue = true;
98
+
99
+ // use synchronize() to access any of these commit functions outside MPSStream
100
+ void commit();
101
+ void commitAndWait();
102
+ void commitAndContinue();
103
+ void flush();
104
+ };
105
+
106
+ /**
107
+ * Get the current MPS stream
108
+ */
109
+ TORCH_API MPSStream* getCurrentMPSStream();
110
+
111
+ /**
112
+ * Get the default MPS stream
113
+ */
114
+ TORCH_API MPSStream* getDefaultMPSStream();
115
+
116
+ //-----------------------------------------------------------------
117
+ // MPSStreamImpl
118
+ //-----------------------------------------------------------------
119
+
120
+ class TORCH_API MPSStreamImpl
121
+ {
122
+ public:
123
+ /**
124
+ * Gets single instance of the MPSStream.
125
+ */
126
+ static MPSStream* getInstance();
127
+
128
+ private:
129
+ static MPSStream* _stream;
130
+ MPSStreamImpl();
131
+ };
132
+
133
+ } // namespace at::mps
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_conj_physical_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_conj_physical(Tensor self) -> Tensor
26
+ inline at::Tensor _conj_physical(const at::Tensor & self) {
27
+ return at::_ops::_conj_physical::call(self);
28
+ }
29
+
30
+ // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self) {
32
+ return at::_ops::_conj_physical_out::call(self, out);
33
+ }
34
+ // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
36
+ return at::_ops::_conj_physical_out::call(self, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided);
21
+ TORCH_API at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided);
22
+ TORCH_API at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_cpu_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_cos(at::TensorList self);
21
+ TORCH_API void _foreach_cos_(at::TensorList self);
22
+
23
+ } // namespace cpu
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_erfc {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_erfc_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_erfc_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _functional_sym_constrain_range {
18
+ using schema = at::Tensor (const at::Scalar &, c10::optional<int64_t>, c10::optional<int64_t>, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_functional_sym_constrain_range")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor")
24
+ static at::Tensor call(const at::Scalar & size, c10::optional<int64_t> min, c10::optional<int64_t> max, const at::Tensor & dep_token);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional<int64_t> min, c10::optional<int64_t> max, const at::Tensor & dep_token);
26
+ };
27
+
28
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_mkldnn_reshape_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
26
+ inline at::Tensor _mkldnn_reshape(const at::Tensor & self, at::IntArrayRef shape) {
27
+ return at::_ops::_mkldnn_reshape::call(self, shape);
28
+ }
29
+
30
+ // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _mkldnn_reshape_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) {
32
+ return at::_ops::_mkldnn_reshape_out::call(self, shape, out);
33
+ }
34
+ // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _mkldnn_reshape_outf(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
36
+ return at::_ops::_mkldnn_reshape_out::call(self, shape, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _to_sparse_bsc {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional<int64_t>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsc")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim);
26
+ };
27
+
28
+ struct TORCH_API _to_sparse_bsc_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional<int64_t>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsc")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
21
+ TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
26
+ inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
27
+ return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
28
+ }
29
+
30
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API adaptive_avg_pool2d_out {
18
+ using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API adaptive_avg_pool2d {
29
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/addbmm_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
26
+ inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
27
+ return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
28
+ }
29
+ // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
30
+ inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
31
+ return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
32
+ }
33
+
34
+ // aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
35
+ inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
36
+ return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation);
21
+ TORCH_API at::Tensor conv_depthwise3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation);
22
+
23
+ } // namespace cuda
24
+ } // namespace at