applied-ai-018 commited on
Commit
65cf362
·
verified ·
1 Parent(s): 6f419cc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/11.input_layernorm.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/22.attention.dense.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h +9 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh +149 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h +19 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h +76 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h +318 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh +15 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h +20 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh +121 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h +44 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h +5 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh +4 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh +78 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h +87 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h +54 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h +151 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh +36 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh +124 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h +37 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h +11 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh +119 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh +43 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh +116 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh +28 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h +40 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h +174 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h +379 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h +275 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h +34 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/Tunable.h +205 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h +278 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h +242 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/detail/AcceleratorHooksInterface.h +21 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h +201 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h +102 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h +70 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h +106 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/detail/MTIAHooksInterface.h +61 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/detail/ORTHooksInterface.h +36 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h +61 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h +80 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_ops.h +28 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_cpu_dispatch.h +23 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h +30 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h +26 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt.h +39 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h +39 -0
ckpts/universal/global_step120/zero/11.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aab089410b83bae4ed8a2351513c07674533c42f932cbdd99a9c1e6b044d8d39
3
+ size 9293
ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d1b42b6497b6d821f374aaba1e0877983148d7a49811c05daaffcd42add6f6
3
+ size 33555627
ckpts/universal/global_step20/zero/22.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2fe52c4fb3a32acfa0cca5048c37a51e2d0c342707b618aab5a6d36c95e908d
3
+ size 16778317
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cuda_runtime.h>
5
+ #include <cuda_fp16.h>
6
+
7
+ #include <c10/macros/Export.h>
8
+
9
+ // Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ // Collection of direct PTX functions
5
+
6
+ namespace at::cuda {
7
+
8
+ template <typename T>
9
+ struct Bitfield {};
10
+
11
+ template <>
12
+ struct Bitfield<unsigned int> {
13
+ static __device__ __host__ __forceinline__
14
+ unsigned int getBitfield(unsigned int val, int pos, int len) {
15
+ #if !defined(__CUDA_ARCH__)
16
+ pos &= 0xff;
17
+ len &= 0xff;
18
+
19
+ unsigned int m = (1u << len) - 1u;
20
+ return (val >> pos) & m;
21
+ #else
22
+ unsigned int ret;
23
+ asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
24
+ return ret;
25
+ #endif
26
+ }
27
+
28
+ static __device__ __host__ __forceinline__
29
+ unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
30
+ #if !defined(__CUDA_ARCH__)
31
+ pos &= 0xff;
32
+ len &= 0xff;
33
+
34
+ unsigned int m = (1u << len) - 1u;
35
+ toInsert &= m;
36
+ toInsert <<= pos;
37
+ m <<= pos;
38
+
39
+ return (val & ~m) | toInsert;
40
+ #else
41
+ unsigned int ret;
42
+ asm("bfi.b32 %0, %1, %2, %3, %4;" :
43
+ "=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
44
+ return ret;
45
+ #endif
46
+ }
47
+ };
48
+
49
+ template <>
50
+ struct Bitfield<uint64_t> {
51
+ static __device__ __host__ __forceinline__
52
+ uint64_t getBitfield(uint64_t val, int pos, int len) {
53
+ #if !defined(__CUDA_ARCH__)
54
+ pos &= 0xff;
55
+ len &= 0xff;
56
+
57
+ uint64_t m = (1u << len) - 1u;
58
+ return (val >> pos) & m;
59
+ #else
60
+ uint64_t ret;
61
+ asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len));
62
+ return ret;
63
+ #endif
64
+ }
65
+
66
+ static __device__ __host__ __forceinline__
67
+ uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) {
68
+ #if !defined(__CUDA_ARCH__)
69
+ pos &= 0xff;
70
+ len &= 0xff;
71
+
72
+ uint64_t m = (1u << len) - 1u;
73
+ toInsert &= m;
74
+ toInsert <<= pos;
75
+ m <<= pos;
76
+
77
+ return (val & ~m) | toInsert;
78
+ #else
79
+ uint64_t ret;
80
+ asm("bfi.b64 %0, %1, %2, %3, %4;" :
81
+ "=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len));
82
+ return ret;
83
+ #endif
84
+ }
85
+ };
86
+
87
+ __device__ __forceinline__ int getLaneId() {
88
+ #if defined(USE_ROCM)
89
+ return __lane_id();
90
+ #else
91
+ int laneId;
92
+ asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
93
+ return laneId;
94
+ #endif
95
+ }
96
+
97
+ #if defined(USE_ROCM)
98
+ __device__ __forceinline__ unsigned long long int getLaneMaskLt() {
99
+ const std::uint64_t m = (1ull << getLaneId()) - 1ull;
100
+ return m;
101
+ }
102
+ #else
103
+ __device__ __forceinline__ unsigned getLaneMaskLt() {
104
+ unsigned mask;
105
+ asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask));
106
+ return mask;
107
+ }
108
+ #endif
109
+
110
+ #if defined (USE_ROCM)
111
+ __device__ __forceinline__ unsigned long long int getLaneMaskLe() {
112
+ std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1));
113
+ return m;
114
+ }
115
+ #else
116
+ __device__ __forceinline__ unsigned getLaneMaskLe() {
117
+ unsigned mask;
118
+ asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask));
119
+ return mask;
120
+ }
121
+ #endif
122
+
123
+ #if defined(USE_ROCM)
124
+ __device__ __forceinline__ unsigned long long int getLaneMaskGt() {
125
+ const std::uint64_t m = getLaneMaskLe();
126
+ return m ? ~m : m;
127
+ }
128
+ #else
129
+ __device__ __forceinline__ unsigned getLaneMaskGt() {
130
+ unsigned mask;
131
+ asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask));
132
+ return mask;
133
+ }
134
+ #endif
135
+
136
+ #if defined(USE_ROCM)
137
+ __device__ __forceinline__ unsigned long long int getLaneMaskGe() {
138
+ const std::uint64_t m = getLaneMaskLt();
139
+ return ~m;
140
+ }
141
+ #else
142
+ __device__ __forceinline__ unsigned getLaneMaskGe() {
143
+ unsigned mask;
144
+ asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask));
145
+ return mask;
146
+ }
147
+ #endif
148
+
149
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Test these using #if AT_CUDNN_ENABLED(), not #ifdef, so that it's
4
+ // obvious if you forgot to include Config.h
5
+ // c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
6
+ //
7
+ // NB: This header MUST NOT be included from other headers; it should
8
+ // only be included from C++ files.
9
+ #define AT_CUDNN_ENABLED() 1
10
+ #define AT_CUSPARSELT_ENABLED() 1
11
+ #define AT_ROCM_ENABLED() 0
12
+ #define AT_MAGMA_ENABLED() 1
13
+
14
+ // Needed for hipMAGMA to correctly identify implementation
15
+ #if (AT_ROCM_ENABLED() && AT_MAGMA_ENABLED())
16
+ #define HAVE_HIP 1
17
+ #endif
18
+
19
+ #define NVCC_FLAGS_EXTRA "-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90"
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #if defined(USE_ROCM)
5
+ #include <hipsparse/hipsparse-version.h>
6
+ #define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch)
7
+ #endif
8
+
9
+ // cuSparse Generic API added in CUDA 10.1
10
+ // Windows support added in CUDA 11.0
11
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32)))
12
+ #define AT_USE_CUSPARSE_GENERIC_API() 1
13
+ #else
14
+ #define AT_USE_CUSPARSE_GENERIC_API() 0
15
+ #endif
16
+
17
+ // cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0
18
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
19
+ (CUSPARSE_VERSION < 12000)
20
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1
21
+ #else
22
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0
23
+ #endif
24
+
25
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
26
+ (CUSPARSE_VERSION >= 12000)
27
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1
28
+ #else
29
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0
30
+ #endif
31
+
32
+ #if defined(USE_ROCM)
33
+ // hipSparse const API added in v2.4.0
34
+ #if HIPSPARSE_VERSION >= 200400
35
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1
36
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
37
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
38
+ #else
39
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
40
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1
41
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
42
+ #endif
43
+ #else // USE_ROCM
44
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
45
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
46
+ #define AT_USE_HIPSPARSE_GENERIC_API() 0
47
+ #endif // USE_ROCM
48
+
49
+ // cuSparse Generic API spsv function was added in CUDA 11.3.0
50
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500)
51
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 1
52
+ #else
53
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 0
54
+ #endif
55
+
56
+ // cuSparse Generic API spsm function was added in CUDA 11.3.1
57
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600)
58
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 1
59
+ #else
60
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 0
61
+ #endif
62
+
63
+ // cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400)
64
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400)
65
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 1
66
+ #else
67
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 0
68
+ #endif
69
+
70
+ // BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0)
71
+ #if defined(CUDART_VERSION) || \
72
+ (defined(USE_ROCM) && ROCM_VERSION >= 40500 )
73
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1
74
+ #else
75
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0
76
+ #endif
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /*
4
+ Provides a subset of cuSPARSE functions as templates:
5
+
6
+ csrgeam2<scalar_t>(...)
7
+
8
+ where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
9
+ The functions are available in at::cuda::sparse namespace.
10
+ */
11
+
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/CUDASparse.h>
14
+
15
+ namespace at::cuda::sparse {
16
+
17
+ #define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
18
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
19
+ const cusparseMatDescr_t descrA, int nnzA, \
20
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
21
+ const int *csrSortedColIndA, const scalar_t *beta, \
22
+ const cusparseMatDescr_t descrB, int nnzB, \
23
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
24
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
25
+ const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \
26
+ const int *csrSortedColIndC, size_t *pBufferSizeInBytes
27
+
28
+ template <typename scalar_t>
29
+ inline void csrgeam2_bufferSizeExt(
30
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) {
31
+ TORCH_INTERNAL_ASSERT(
32
+ false,
33
+ "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ",
34
+ typeid(scalar_t).name());
35
+ }
36
+
37
+ template <>
38
+ void csrgeam2_bufferSizeExt<float>(
39
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float));
40
+ template <>
41
+ void csrgeam2_bufferSizeExt<double>(
42
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double));
43
+ template <>
44
+ void csrgeam2_bufferSizeExt<c10::complex<float>>(
45
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>));
46
+ template <>
47
+ void csrgeam2_bufferSizeExt<c10::complex<double>>(
48
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>));
49
+
50
+ #define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \
51
+ cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \
52
+ int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \
53
+ const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \
54
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
55
+ int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace
56
+
57
+ template <typename scalar_t>
58
+ inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) {
59
+ TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz(
60
+ handle,
61
+ m,
62
+ n,
63
+ descrA,
64
+ nnzA,
65
+ csrSortedRowPtrA,
66
+ csrSortedColIndA,
67
+ descrB,
68
+ nnzB,
69
+ csrSortedRowPtrB,
70
+ csrSortedColIndB,
71
+ descrC,
72
+ csrSortedRowPtrC,
73
+ nnzTotalDevHostPtr,
74
+ workspace));
75
+ }
76
+
77
+ #define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \
78
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
79
+ const cusparseMatDescr_t descrA, int nnzA, \
80
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
81
+ const int *csrSortedColIndA, const scalar_t *beta, \
82
+ const cusparseMatDescr_t descrB, int nnzB, \
83
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
84
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
85
+ scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \
86
+ void *pBuffer
87
+
88
+ template <typename scalar_t>
89
+ inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) {
90
+ TORCH_INTERNAL_ASSERT(
91
+ false,
92
+ "at::cuda::sparse::csrgeam2: not implemented for ",
93
+ typeid(scalar_t).name());
94
+ }
95
+
96
+ template <>
97
+ void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float));
98
+ template <>
99
+ void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double));
100
+ template <>
101
+ void csrgeam2<c10::complex<float>>(
102
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>));
103
+ template <>
104
+ void csrgeam2<c10::complex<double>>(
105
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>));
106
+
107
+ #define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \
108
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
109
+ cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \
110
+ int kb, int nnzb, const scalar_t *alpha, \
111
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
112
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
113
+ const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc
114
+
115
+ template <typename scalar_t>
116
+ inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) {
117
+ TORCH_INTERNAL_ASSERT(
118
+ false,
119
+ "at::cuda::sparse::bsrmm: not implemented for ",
120
+ typeid(scalar_t).name());
121
+ }
122
+
123
+ template <>
124
+ void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float));
125
+ template <>
126
+ void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double));
127
+ template <>
128
+ void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>));
129
+ template <>
130
+ void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>));
131
+
132
+ #define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \
133
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
134
+ cusparseOperation_t transA, int mb, int nb, int nnzb, \
135
+ const scalar_t *alpha, const cusparseMatDescr_t descrA, \
136
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
137
+ int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y
138
+
139
+ template <typename scalar_t>
140
+ inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) {
141
+ TORCH_INTERNAL_ASSERT(
142
+ false,
143
+ "at::cuda::sparse::bsrmv: not implemented for ",
144
+ typeid(scalar_t).name());
145
+ }
146
+
147
+ template <>
148
+ void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float));
149
+ template <>
150
+ void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double));
151
+ template <>
152
+ void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>));
153
+ template <>
154
+ void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>));
155
+
156
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
157
+
158
+ #define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \
159
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
160
+ cusparseOperation_t transA, int mb, int nnzb, \
161
+ const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
162
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
163
+ bsrsv2Info_t info, int *pBufferSizeInBytes
164
+
165
+ template <typename scalar_t>
166
+ inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) {
167
+ TORCH_INTERNAL_ASSERT(
168
+ false,
169
+ "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ",
170
+ typeid(scalar_t).name());
171
+ }
172
+
173
+ template <>
174
+ void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float));
175
+ template <>
176
+ void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double));
177
+ template <>
178
+ void bsrsv2_bufferSize<c10::complex<float>>(
179
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>));
180
+ template <>
181
+ void bsrsv2_bufferSize<c10::complex<double>>(
182
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>));
183
+
184
+ #define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \
185
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
186
+ cusparseOperation_t transA, int mb, int nnzb, \
187
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
188
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
189
+ bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
190
+
191
+ template <typename scalar_t>
192
+ inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) {
193
+ TORCH_INTERNAL_ASSERT(
194
+ false,
195
+ "at::cuda::sparse::bsrsv2_analysis: not implemented for ",
196
+ typeid(scalar_t).name());
197
+ }
198
+
199
+ template <>
200
+ void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float));
201
+ template <>
202
+ void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double));
203
+ template <>
204
+ void bsrsv2_analysis<c10::complex<float>>(
205
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>));
206
+ template <>
207
+ void bsrsv2_analysis<c10::complex<double>>(
208
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>));
209
+
210
+ #define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \
211
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
212
+ cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \
213
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
214
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
215
+ bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \
216
+ cusparseSolvePolicy_t policy, void *pBuffer
217
+
218
+ template <typename scalar_t>
219
+ inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) {
220
+ TORCH_INTERNAL_ASSERT(
221
+ false,
222
+ "at::cuda::sparse::bsrsv2_solve: not implemented for ",
223
+ typeid(scalar_t).name());
224
+ }
225
+
226
+ template <>
227
+ void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float));
228
+ template <>
229
+ void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double));
230
+ template <>
231
+ void bsrsv2_solve<c10::complex<float>>(
232
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>));
233
+ template <>
234
+ void bsrsv2_solve<c10::complex<double>>(
235
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>));
236
+
237
+ #define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \
238
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
239
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
240
+ int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
241
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
242
+ bsrsm2Info_t info, int *pBufferSizeInBytes
243
+
244
+ template <typename scalar_t>
245
+ inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) {
246
+ TORCH_INTERNAL_ASSERT(
247
+ false,
248
+ "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ",
249
+ typeid(scalar_t).name());
250
+ }
251
+
252
+ template <>
253
+ void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float));
254
+ template <>
255
+ void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double));
256
+ template <>
257
+ void bsrsm2_bufferSize<c10::complex<float>>(
258
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>));
259
+ template <>
260
+ void bsrsm2_bufferSize<c10::complex<double>>(
261
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>));
262
+
263
+ #define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \
264
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
265
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
266
+ int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
267
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
268
+ bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
269
+
270
+ template <typename scalar_t>
271
+ inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) {
272
+ TORCH_INTERNAL_ASSERT(
273
+ false,
274
+ "at::cuda::sparse::bsrsm2_analysis: not implemented for ",
275
+ typeid(scalar_t).name());
276
+ }
277
+
278
+ template <>
279
+ void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float));
280
+ template <>
281
+ void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double));
282
+ template <>
283
+ void bsrsm2_analysis<c10::complex<float>>(
284
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>));
285
+ template <>
286
+ void bsrsm2_analysis<c10::complex<double>>(
287
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>));
288
+
289
+ #define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \
290
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
291
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
292
+ int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \
293
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
294
+ int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \
295
+ scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer
296
+
297
+ template <typename scalar_t>
298
+ inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) {
299
+ TORCH_INTERNAL_ASSERT(
300
+ false,
301
+ "at::cuda::sparse::bsrsm2_solve: not implemented for ",
302
+ typeid(scalar_t).name());
303
+ }
304
+
305
+ template <>
306
+ void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float));
307
+ template <>
308
+ void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double));
309
+ template <>
310
+ void bsrsm2_solve<c10::complex<float>>(
311
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>));
312
+ template <>
313
+ void bsrsm2_solve<c10::complex<double>>(
314
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>));
315
+
316
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
317
+
318
+ } // namespace at::cuda::sparse
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ #include <cuda.h>
7
+ #include <cuda_runtime.h>
8
+ #include <cuda_fp16.h>
9
+
10
+ namespace at {
11
+ template <>
12
+ inline __half* Tensor::data() const {
13
+ return reinterpret_cast<__half*>(data<Half>());
14
+ }
15
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ // Check if every tensor in a list of tensors matches the current
8
+ // device.
9
+ inline bool check_device(ArrayRef<Tensor> ts) {
10
+ if (ts.empty()) {
11
+ return true;
12
+ }
13
+ Device curDevice = Device(kCUDA, current_device());
14
+ for (const Tensor& t : ts) {
15
+ if (t.device() != curDevice) return false;
16
+ }
17
+ return true;
18
+ }
19
+
20
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <c10/util/complex.h>
5
+ #include <c10/util/Half.h>
6
+
7
+ __device__ __forceinline__ unsigned int ACTIVE_MASK()
8
+ {
9
+ #if !defined(USE_ROCM)
10
+ return __activemask();
11
+ #else
12
+ // will be ignored anyway
13
+ return 0xffffffff;
14
+ #endif
15
+ }
16
+
17
+ __device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) {
18
+ #if !defined(USE_ROCM)
19
+ return __syncwarp(mask);
20
+ #endif
21
+ }
22
+
23
+ #if defined(USE_ROCM)
24
+ __device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
25
+ {
26
+ return __ballot(predicate);
27
+ }
28
+ #else
29
+ __device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
30
+ {
31
+ #if !defined(USE_ROCM)
32
+ return __ballot_sync(mask, predicate);
33
+ #else
34
+ return __ballot(predicate);
35
+ #endif
36
+ }
37
+ #endif
38
+
39
+ template <typename T>
40
+ __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
41
+ {
42
+ #if !defined(USE_ROCM)
43
+ return __shfl_xor_sync(mask, value, laneMask, width);
44
+ #else
45
+ return __shfl_xor(value, laneMask, width);
46
+ #endif
47
+ }
48
+
49
+ template <typename T>
50
+ __device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff)
51
+ {
52
+ #if !defined(USE_ROCM)
53
+ return __shfl_sync(mask, value, srcLane, width);
54
+ #else
55
+ return __shfl(value, srcLane, width);
56
+ #endif
57
+ }
58
+
59
+ template <typename T>
60
+ __device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
61
+ {
62
+ #if !defined(USE_ROCM)
63
+ return __shfl_up_sync(mask, value, delta, width);
64
+ #else
65
+ return __shfl_up(value, delta, width);
66
+ #endif
67
+ }
68
+
69
+ template <typename T>
70
+ __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
71
+ {
72
+ #if !defined(USE_ROCM)
73
+ return __shfl_down_sync(mask, value, delta, width);
74
+ #else
75
+ return __shfl_down(value, delta, width);
76
+ #endif
77
+ }
78
+
79
+ #if defined(USE_ROCM)
80
+ template<>
81
+ __device__ __forceinline__ int64_t WARP_SHFL_DOWN<int64_t>(int64_t value, unsigned int delta, int width , unsigned int mask)
82
+ {
83
+ //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
84
+ int2 a = *reinterpret_cast<int2*>(&value);
85
+ a.x = __shfl_down(a.x, delta);
86
+ a.y = __shfl_down(a.y, delta);
87
+ return *reinterpret_cast<int64_t*>(&a);
88
+ }
89
+ #endif
90
+
91
+ template<>
92
+ __device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half value, unsigned int delta, int width, unsigned int mask)
93
+ {
94
+ return c10::Half(WARP_SHFL_DOWN<unsigned short>(value.x, delta, width, mask), c10::Half::from_bits_t{});
95
+ }
96
+
97
+ template <typename T>
98
+ __device__ __forceinline__ c10::complex<T> WARP_SHFL_DOWN(c10::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
99
+ {
100
+ #if !defined(USE_ROCM)
101
+ return c10::complex<T>(
102
+ __shfl_down_sync(mask, value.real_, delta, width),
103
+ __shfl_down_sync(mask, value.imag_, delta, width));
104
+ #else
105
+ return c10::complex<T>(
106
+ __shfl_down(value.real_, delta, width),
107
+ __shfl_down(value.imag_, delta, width));
108
+ #endif
109
+ }
110
+
111
+ /**
112
+ * For CC 3.5+, perform a load using __ldg
113
+ */
114
+ template <typename T>
115
+ __device__ __forceinline__ T doLdg(const T* p) {
116
+ #if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM)
117
+ return __ldg(p);
118
+ #else
119
+ return *p;
120
+ #endif
121
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+
4
+ namespace at::detail {
5
+
6
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
7
+ IntArrayRef size,
8
+ ScalarType dtype,
9
+ c10::optional<Device> device_opt,
10
+ c10::optional<c10::MemoryFormat> memory_format_opt);
11
+
12
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
13
+ IntArrayRef size,
14
+ c10::optional<ScalarType> dtype_opt,
15
+ c10::optional<Layout> layout_opt,
16
+ c10::optional<Device> device_opt,
17
+ c10::optional<bool> pin_memory_opt,
18
+ c10::optional<c10::MemoryFormat> memory_format_opt);
19
+
20
+ TORCH_CUDA_CPP_API TensorBase empty_cuda(
21
+ IntArrayRef size,
22
+ const TensorOptions &options);
23
+
24
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ ScalarType dtype,
28
+ c10::optional<Device> device_opt);
29
+
30
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
31
+ IntArrayRef size,
32
+ IntArrayRef stride,
33
+ c10::optional<ScalarType> dtype_opt,
34
+ c10::optional<Layout> layout_opt,
35
+ c10::optional<Device> device_opt,
36
+ c10::optional<bool> pin_memory_opt);
37
+
38
+ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
39
+ IntArrayRef size,
40
+ IntArrayRef stride,
41
+ const TensorOptions &options);
42
+
43
+
44
+ } // namespace at::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ #include <ATen/cuda/detail/PhiloxCudaStateRaw.cuh>
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/PhiloxCudaState.h>
4
+ #include <ATen/cuda/detail/UnpackRaw.cuh>
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ceil_div.h>
4
+ #include <ATen/cuda/DeviceUtils.cuh>
5
+ #include <ATen/cuda/AsmUtils.cuh>
6
+ #include <c10/macros/Macros.h>
7
+
8
+ // Collection of in-kernel scan / prefix sum utilities
9
+
10
+ namespace at::cuda {
11
+
12
+ // Inclusive prefix sum for binary vars using intra-warp voting +
13
+ // shared memory
14
+ template <typename T, bool KillWARDependency, class BinaryFunction>
15
+ __device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
16
+ // Within-warp, we use warp voting.
17
+ #if defined (USE_ROCM)
18
+ unsigned long long int vote = WARP_BALLOT(in);
19
+ T index = __popcll(getLaneMaskLe() & vote);
20
+ T carry = __popcll(vote);
21
+ #else
22
+ T vote = WARP_BALLOT(in);
23
+ T index = __popc(getLaneMaskLe() & vote);
24
+ T carry = __popc(vote);
25
+ #endif
26
+
27
+ int warp = threadIdx.x / C10_WARP_SIZE;
28
+
29
+ // Per each warp, write out a value
30
+ if (getLaneId() == 0) {
31
+ smem[warp] = carry;
32
+ }
33
+
34
+ __syncthreads();
35
+
36
+ // Sum across warps in one thread. This appears to be faster than a
37
+ // warp shuffle scan for CC 3.0+
38
+ if (threadIdx.x == 0) {
39
+ int current = 0;
40
+ for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
41
+ T v = smem[i];
42
+ smem[i] = binop(smem[i], current);
43
+ current = binop(current, v);
44
+ }
45
+ }
46
+
47
+ __syncthreads();
48
+
49
+ // load the carry from the preceding warp
50
+ if (warp >= 1) {
51
+ index = binop(index, smem[warp - 1]);
52
+ }
53
+
54
+ *out = index;
55
+
56
+ if (KillWARDependency) {
57
+ __syncthreads();
58
+ }
59
+ }
60
+
61
+ // Exclusive prefix sum for binary vars using intra-warp voting +
62
+ // shared memory
63
+ template <typename T, bool KillWARDependency, class BinaryFunction>
64
+ __device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
65
+ inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
66
+
67
+ // Inclusive to exclusive
68
+ *out -= (T) in;
69
+
70
+ // The outgoing carry for all threads is the last warp's sum
71
+ *carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
72
+
73
+ if (KillWARDependency) {
74
+ __syncthreads();
75
+ }
76
+ }
77
+
78
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <c10/core/ScalarType.h>
4
+ #include <ATen/cuda/CUDAConfig.h>
5
+
6
+ // NOTE: These templates are intentionally not defined in this header,
7
+ // which aviods re-compiling them for each translation unit. If you get
8
+ // a link error, you need to add an explicit instantiation for your
9
+ // types in cub.cu
10
+
11
+ namespace at::cuda::cub {
12
+
13
+ inline int get_num_bits(uint64_t max_key) {
14
+ int num_bits = 1;
15
+ while (max_key > 1) {
16
+ max_key >>= 1;
17
+ num_bits++;
18
+ }
19
+ return num_bits;
20
+ }
21
+
22
+ namespace detail {
23
+
24
+ // radix_sort_pairs doesn't interact with value_t other than to copy
25
+ // the data, so we can save template instantiations by reinterpreting
26
+ // it as an opaque type.
27
+ template <int N> struct alignas(N) OpaqueType { char data[N]; };
28
+
29
+ template<typename key_t, int value_size>
30
+ void radix_sort_pairs_impl(
31
+ const key_t *keys_in, key_t *keys_out,
32
+ const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out,
33
+ int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
34
+
35
+ } // namespace detail
36
+
37
+ template<typename key_t, typename value_t>
38
+ void radix_sort_pairs(
39
+ const key_t *keys_in, key_t *keys_out,
40
+ const value_t *values_in, value_t *values_out,
41
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) {
42
+ static_assert(std::is_trivially_copyable<value_t>::value ||
43
+ AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types
44
+ "radix_sort_pairs value type must be trivially copyable");
45
+ // Make value type opaque, so all inputs of a certain size use the same template instantiation
46
+ using opaque_t = detail::OpaqueType<sizeof(value_t)>;
47
+ static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0,
48
+ "This size of value_t is not instantiated. Please instantiate it in cub.cu"
49
+ " and modify this check.");
50
+ static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned");
51
+ detail::radix_sort_pairs_impl(
52
+ keys_in, keys_out,
53
+ reinterpret_cast<const opaque_t*>(values_in),
54
+ reinterpret_cast<opaque_t*>(values_out),
55
+ n, descending, begin_bit, end_bit);
56
+ }
57
+
58
+ template<typename key_t>
59
+ void radix_sort_keys(
60
+ const key_t *keys_in, key_t *keys_out,
61
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8);
62
+
63
+ // NOTE: Intermediate sums will be truncated to input_t precision
64
+ template <typename input_t, typename output_t>
65
+ void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n);
66
+
67
+ template <typename scalar_t>
68
+ void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
69
+ return inclusive_sum_truncating(input, output, n);
70
+ }
71
+
72
+ // NOTE: Sums are done is common_type<input_t, output_t>
73
+ template <typename input_t, typename output_t>
74
+ void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n);
75
+
76
+ template <typename scalar_t>
77
+ void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
78
+ return exclusive_sum_in_common_type(input, output, n);
79
+ }
80
+
81
+ void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n);
82
+ inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) {
83
+ return mask_exclusive_sum(
84
+ reinterpret_cast<const uint8_t*>(mask), output_idx, n);
85
+ }
86
+
87
+ } // namespace at::cuda::cub
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/detail/CUDAHooksInterface.h>
4
+
5
+ #include <ATen/Generator.h>
6
+ #include <c10/util/Optional.h>
7
+
8
+ // TODO: No need to have this whole header, we can just put it all in
9
+ // the cpp file
10
+
11
+ namespace at::cuda::detail {
12
+
13
+ // Set the callback to initialize Magma, which is set by
14
+ // torch_cuda_cu. This indirection is required so magma_init is called
15
+ // in the same library where Magma will be used.
16
+ TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)());
17
+
18
+
19
+ // The real implementation of CUDAHooksInterface
20
+ struct CUDAHooks : public at::CUDAHooksInterface {
21
+ CUDAHooks(at::CUDAHooksArgs) {}
22
+ void initCUDA() const override;
23
+ Device getDeviceFromPtr(void* data) const override;
24
+ bool isPinnedPtr(const void* data) const override;
25
+ const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override;
26
+ bool hasCUDA() const override;
27
+ bool hasMAGMA() const override;
28
+ bool hasCuDNN() const override;
29
+ bool hasCuSOLVER() const override;
30
+ bool hasROCM() const override;
31
+ const at::cuda::NVRTC& nvrtc() const override;
32
+ DeviceIndex current_device() const override;
33
+ bool hasPrimaryContext(DeviceIndex device_index) const override;
34
+ Allocator* getCUDADeviceAllocator() const override;
35
+ Allocator* getPinnedMemoryAllocator() const override;
36
+ bool compiledWithCuDNN() const override;
37
+ bool compiledWithMIOpen() const override;
38
+ bool supportsDilatedConvolutionWithCuDNN() const override;
39
+ bool supportsDepthwiseConvolutionWithCuDNN() const override;
40
+ bool supportsBFloat16ConvolutionWithCuDNNv8() const override;
41
+ bool hasCUDART() const override;
42
+ long versionCUDART() const override;
43
+ long versionCuDNN() const override;
44
+ std::string showConfig() const override;
45
+ double batchnormMinEpsilonCuDNN() const override;
46
+ int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex device_index) const override;
47
+ void cuFFTSetPlanCacheMaxSize(DeviceIndex device_index, int64_t max_size) const override;
48
+ int64_t cuFFTGetPlanCacheSize(DeviceIndex device_index) const override;
49
+ void cuFFTClearPlanCache(DeviceIndex device_index) const override;
50
+ int getNumGPUs() const override;
51
+ void deviceSynchronize(DeviceIndex device_index) const override;
52
+ };
53
+
54
+ } // at::cuda::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states.
2
+ // These handles are tied to device, and these libraries requires/recommends not to
3
+ // share handles across host threads.
4
+ //
5
+ // These libraries recommend using one handle per host thread. We may not want to do
6
+ // this because threads are relatively light-weight, but creating and destroying
7
+ // handles is expensive (destroying the handle causes synchronizations). DataParallel,
8
+ // for example, creates new threads for each forward pass.
9
+ //
10
+ // This file implements a handle pool mechanism. The handle pool returns handles on
11
+ // demand as threads request them. If all existing handles in the pool are in use,
12
+ // it creates a new one. As threads terminate, they release handles back into the pool.
13
+ // In this way, the handle pool never creates more handles than the high-water mark of
14
+ // active threads, so it's efficient with DataParallel.
15
+
16
+ #pragma once
17
+
18
+ #include <unordered_map>
19
+ #include <vector>
20
+ #include <utility>
21
+ #include <mutex>
22
+ #include <memory>
23
+
24
+ #include <c10/util/Exception.h>
25
+
26
+ namespace at::cuda { namespace {
27
+
28
+ template <typename Handle_t, void Create(Handle_t *), void Destroy(Handle_t)>
29
+ struct DeviceThreadHandlePool : public std::enable_shared_from_this<DeviceThreadHandlePool<Handle_t, Create, Destroy>> {
30
+
31
+ struct Handle {
32
+ Handle_t handle;
33
+ Handle(bool create = false) : handle(nullptr)
34
+ {
35
+ if(create) Create(&handle);
36
+ }
37
+ // std::vector.emplace() and push_back() may route through temporaries and call
38
+ // copy/move constructors along the way. If this is the case, we don't want
39
+ // the destructors of temporaries to call cudnnDestroy on the handle.
40
+ // We can achieve safety (for the narrow case of stashing within std::vectors)
41
+ // by making Handle moveable but not copyable, and transferring handle ownership
42
+ // to the latest constructed object. This is not a substitute for full-blown
43
+ // reference counting, but reference counting may be overkill here.
44
+ // Another alternative is to wrap the saved Handles in unique_ptrs, i.e.,
45
+ // unordered_map<int, vector<unique_ptr<Handle>>> created_handles;
46
+ Handle(const Handle& rhs) = delete;
47
+ // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom
48
+ Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); }
49
+ // operator= takes argument by value
50
+ Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; }
51
+ ~Handle() {
52
+ if(handle) Destroy(handle);
53
+ }
54
+ };
55
+
56
+ std::mutex mutex;
57
+
58
+ // Handles are lazily created as different threads request them,
59
+ // but are never destroyed until the end of the process.
60
+ // The maximum number of handles this process will create for each device is equal
61
+ // to the high-water mark of the number of concurrently active threads that request
62
+ // handles for that device.
63
+ // When threads terminate, they release their handles back into the pool for reuse.
64
+ // Otherwise, new handles would be created every time new threads were spawned,
65
+ // resulting in poor performance for Python modules that repeatedly or frequently
66
+ // spawned new sets of threads (like DataParallel, which creates a new set of threads
67
+ // for each forward pass).
68
+ //
69
+ // To prevent potential deadlocks, we explicitly choose not to cap the number
70
+ // of handles that are created per device.
71
+ // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device,
72
+ // only 4 can make forward progress at any time. The other 4 will not release their
73
+ // handles until they exit, so the fifth cannot make progress until then. This is
74
+ // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an
75
+ // intermediate point (ie, before any of them have exited). We have no way to anticipate
76
+ // or enforce that user threads will not attempt such intermediate synchronization.
77
+ // The only way to ensure safety is to avoid imposing a cap on the number of handles.
78
+ std::unordered_map<int, std::vector<Handle>> created_handles;
79
+ std::unordered_map<int, std::vector<Handle_t>> available_handles;
80
+
81
+ // PoolWindow lazily creates and caches the handles that a particular thread is using,
82
+ // so in the common case handle access doesn't incur either handle creation or a mutex lock.
83
+ class PoolWindow
84
+ {
85
+ public:
86
+ PoolWindow(std::shared_ptr<DeviceThreadHandlePool> parent): weak_parent(std::move(parent)) {}
87
+ ~PoolWindow(){ release(); }
88
+
89
+ Handle_t reserve(int device)
90
+ {
91
+ // If this thread already has a handle for this device, return it
92
+ if(my_handles.find(device) != my_handles.end())
93
+ return my_handles[device];
94
+
95
+ // otherwise, either grab a handle from the pool if one is available,
96
+ // or if not, create a new one.
97
+ auto parent = weak_parent.lock();
98
+ TORCH_CHECK(parent, "Cannot create handle during program termination");
99
+ std::lock_guard<std::mutex> guard(parent->mutex);
100
+
101
+ if(parent->available_handles[device].size() > 0)
102
+ {
103
+ my_handles[device] = parent->available_handles[device].back();
104
+ parent->available_handles[device].pop_back();
105
+ }
106
+ else
107
+ {
108
+ // In local testing, I do observe that emplace_back sometimes routes through temporaries
109
+ // that incur move-constructor and destructor calls. See comments in Handle above.
110
+ parent->created_handles[device].emplace_back(true /*create*/);
111
+ my_handles[device] = parent->created_handles[device].back().handle;
112
+ }
113
+
114
+ return my_handles[device];
115
+ }
116
+
117
+ private:
118
+ // Stores the per-device handles currently owned by this thread
119
+ std::unordered_map<int, Handle_t> my_handles;
120
+
121
+ std::weak_ptr<DeviceThreadHandlePool> weak_parent;
122
+
123
+ // Called by the destructor. Releases this thread's handles back into the pool.
124
+ void release() {
125
+ if(my_handles.size() > 0) {
126
+ auto parent = weak_parent.lock();
127
+ if (!parent) {
128
+ // If this thread exits after atexit handlers have completed, the
129
+ // cuda context itself may be invalid, so we must leak the handles.
130
+ return;
131
+ }
132
+
133
+ std::lock_guard<std::mutex> guard(parent->mutex);
134
+ for(auto d_h : my_handles)
135
+ parent->available_handles[d_h.first].push_back(d_h.second);
136
+ }
137
+ }
138
+ };
139
+
140
+ // Warning:
141
+ // If you want to change this function, be aware that this function will be called
142
+ // by multiple threads and there is no mutex guarding the call of this function, so
143
+ // make sure your implementation is thread-safe.
144
+ PoolWindow *newPoolWindow() {
145
+ // The returned pointer will be owned by a thread local variable
146
+ // so that different threads does not share the same PoolWindow.
147
+ return new PoolWindow(this->shared_from_this());
148
+ }
149
+ };
150
+
151
+ }} // namespace at::cuda::detail::<anonymous>
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/TensorBase.h>
4
+ #include <ATen/cuda/detail/TensorInfo.cuh>
5
+ #include <ATen/native/CanUse32BitIndexMath.h>
6
+
7
+ namespace at::cuda::detail {
8
+
9
+ TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
10
+ using at::native::canUse32BitIndexMath;
11
+
12
+ template <typename scalar, typename IndexType>
13
+ TensorInfo<scalar, IndexType>
14
+ getTensorInfo(const at::TensorBase &t) {
15
+ IndexType sz[MAX_TENSORINFO_DIMS];
16
+ IndexType st[MAX_TENSORINFO_DIMS];
17
+
18
+ int dims = t.dim();
19
+ for (int i = 0; i < dims; ++i) {
20
+ sz[i] = t.size(i);
21
+ st[i] = t.stride(i);
22
+ }
23
+
24
+ scalar* data_ptr = nullptr;
25
+
26
+ if constexpr (std::is_const<scalar>::value) {
27
+ data_ptr = t.const_data_ptr<scalar>();
28
+ } else {
29
+ data_ptr = t.mutable_data_ptr<scalar>();
30
+ }
31
+
32
+ return TensorInfo<scalar, IndexType>(
33
+ data_ptr, dims, sz, st);
34
+ }
35
+
36
+ } // namespace at::cuda::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <assert.h>
4
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
5
+ #include <cuda_runtime.h>
6
+ #endif
7
+
8
+ namespace at::cuda::detail {
9
+
10
+ // A utility class to implement integer division by multiplication, given a fixed
11
+ // divisor.
12
+ //
13
+ // WARNING: The fast divider algorithm is only implemented for unsigned int;
14
+ // otherwise we default to plain integer division. For unsigned int,
15
+ // we further assume that the dividend is at most INT32_MAX. Thus,
16
+ // IntDivider must NOT be used for general integer division.
17
+ //
18
+ // This reduced range is enough for our purpose, and it allows us to
19
+ // slightly simplify the computation.
20
+ //
21
+ // (NOTE: Below, "2^k" denotes exponentiation, i.e., 1<<k.)
22
+ //
23
+ // For any N-bit unsigned integer d (> 0), we can find a "magic number" m (2^N
24
+ // <= m < 2^(N+1)) and shift s such that:
25
+ //
26
+ // \floor(n / d) = \floor((m * n) / 2^(N+s)).
27
+ //
28
+ // Given such m and s, the integer division can be then implemented as:
29
+ //
30
+ // let m' = m - 2^N // 0 <= m' < 2^N
31
+ //
32
+ // fast_integer_division(n):
33
+ // // Multiply two N-bit unsigned integers: the result is a 2N-bit unsigned
34
+ // // integer. Then take the higher N bits.
35
+ // t = (m' * n) >> N
36
+ //
37
+ // // Here we use the fact that n is less than 2^(N-1): otherwise the value
38
+ // // of (t + n) may not fit in an N-bit integer.
39
+ // return (t + n) >> s
40
+ //
41
+ // Finding such a magic number is surprisingly easy:
42
+ //
43
+ // s = \ceil(\log_2 d)
44
+ // m' = \floor(2^N * (2^s - d) / d) + 1 // Need 2N-bit integer arithmetic.
45
+ //
46
+ // See also:
47
+ // - Division by Invariant Integers Using Multiplication,
48
+ // Torbjörn Granlund and Peter L. Montgomery, 1994.
49
+ //
50
+ // - http://www.hackersdelight.org/magic.htm
51
+ //
52
+ // - http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html
53
+
54
+ // Result of div/mod operation stored together.
55
+ template <typename Value>
56
+ struct DivMod {
57
+ Value div, mod;
58
+
59
+ C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
60
+ };
61
+
62
+ // Base case: we only have an implementation for uint32_t for now. For
63
+ // everything else, we use plain division.
64
+ template <typename Value>
65
+ struct IntDivider {
66
+ IntDivider() = default;
67
+ IntDivider(Value d) : divisor(d) { }
68
+
69
+ C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
70
+ C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
71
+ C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
72
+ return DivMod<Value>(n / divisor, n % divisor);
73
+ }
74
+
75
+ Value divisor;
76
+ };
77
+
78
+ // Implement fast integer division.
79
+ template <>
80
+ struct IntDivider<unsigned int> {
81
+ static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
82
+
83
+ IntDivider() = default;
84
+
85
+ IntDivider(unsigned int d) : divisor(d) {
86
+ assert(divisor >= 1 && divisor <= INT32_MAX);
87
+
88
+ // TODO: gcc/clang has __builtin_clz() but it's not portable.
89
+ for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
90
+
91
+ uint64_t one = 1;
92
+ uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
93
+ m1 = magic;
94
+ assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits.
95
+ }
96
+
97
+ C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
98
+ #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
99
+ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and
100
+ // 'm1'.
101
+ unsigned int t = __umulhi(n, m1);
102
+ return (t + n) >> shift;
103
+ #else
104
+ // Using uint64_t so that the addition does not overflow.
105
+ uint64_t t = ((uint64_t) n * m1) >> 32;
106
+ return (t + n) >> shift;
107
+ #endif
108
+ }
109
+
110
+ C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
111
+ return n - div(n) * divisor;
112
+ }
113
+
114
+ C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
115
+ unsigned int q = div(n);
116
+ return DivMod<unsigned int>(q, n - q * divisor);
117
+ }
118
+
119
+ unsigned int divisor; // d above.
120
+ unsigned int m1; // Magic number: m' above.
121
+ unsigned int shift; // Shift amounts.
122
+ };
123
+
124
+ } // namespace at::cuda::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <limits>
4
+ #include <c10/util/Exception.h>
5
+
6
+ namespace at::cuda::detail {
7
+
8
+ // CUDA: grid stride looping
9
+ //
10
+ // int64_t _i_n_d_e_x specifically prevents overflow in the loop increment.
11
+ // If input.numel() < INT_MAX, _i_n_d_e_x < INT_MAX, except after the final
12
+ // iteration of the loop where _i_n_d_e_x += blockDim.x * gridDim.x can be
13
+ // greater than INT_MAX. But in that case _i_n_d_e_x >= n, so there are no
14
+ // further iterations and the overflowed value in i=_i_n_d_e_x is not used.
15
+ #define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \
16
+ int64_t _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \
17
+ for (index_type i=_i_n_d_e_x; _i_n_d_e_x < (n); _i_n_d_e_x+=blockDim.x * gridDim.x, i=_i_n_d_e_x)
18
+
19
+ #define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int)
20
+
21
+
22
+ // Use 1024 threads per block, which requires cuda sm_2x or above
23
+ constexpr int CUDA_NUM_THREADS = 1024;
24
+
25
+ // CUDA: number of blocks for threads.
26
+ inline int GET_BLOCKS(const int64_t N, const int64_t max_threads_per_block=CUDA_NUM_THREADS) {
27
+ TORCH_INTERNAL_ASSERT(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N);
28
+ constexpr int64_t max_int = std::numeric_limits<int>::max();
29
+
30
+ // Round up division for positive number that cannot cause integer overflow
31
+ auto block_num = (N - 1) / max_threads_per_block + 1;
32
+ TORCH_INTERNAL_ASSERT(block_num <= max_int, "Can't schedule too many blocks on CUDA device");
33
+
34
+ return static_cast<int>(block_num);
35
+ }
36
+
37
+ } // namespace at::cuda::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/detail/CUDAHooksInterface.h>
3
+ namespace at::cuda {
4
+ // Forward-declares at::cuda::NVRTC
5
+ struct NVRTC;
6
+
7
+ namespace detail {
8
+ extern NVRTC lazyNVRTC;
9
+ } // namespace detail
10
+
11
+ } // namespace at::cuda
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <array>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+ #include <c10/macros/Macros.h>
7
+ #include <ATen/core/Array.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/cuda/detail/IntegerDivider.cuh>
10
+
11
+ // If element_sizes is nullptr, then the strides will be in bytes, otherwise
12
+ // the strides will be in # of elements.
13
+ // Operands that share the same shape, but may have different strides.
14
+ // OffsetCalculator iterates the tensor in a column-major order
15
+
16
+ #if defined(USE_ROCM)
17
+ constexpr int MAX_DIMS = 16;
18
+ #else
19
+ constexpr int MAX_DIMS = 25;
20
+ #endif
21
+
22
+ template <int NARGS, typename index_t = uint32_t, bool signed_strides = false>
23
+ struct OffsetCalculator {
24
+ // We allow having negative strides to implement some operations like torch.flip
25
+ using stride_t = std::conditional_t<signed_strides,
26
+ std::make_signed_t<index_t>,
27
+ index_t>;
28
+ // The offset for each argument. Wrapper around fixed-size array.
29
+ // On CUDA, zero sized array is not allowed, so when we are handling nullary
30
+ // operators, we need to create a size 1 offset to avoid compiler failure.
31
+ // This size 1 offset is just a placeholder, and we will not use it.
32
+ using offset_type = at::detail::Array<stride_t, std::max<int>(NARGS, 1)>;
33
+
34
+ // if element_sizes is nullptr, then the strides will be in bytes, otherwise
35
+ // the strides will be in # of elements.
36
+ OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides, const int64_t* element_sizes=nullptr) : dims(dims) {
37
+ TORCH_CHECK(dims <= MAX_DIMS, "tensor has too many (>", MAX_DIMS, ") dims");
38
+ for (int i=0; i < dims; i++){
39
+ sizes_[i] = at::cuda::detail::IntDivider<index_t>(sizes[i]);
40
+ for (int arg = 0; arg < NARGS; arg++) {
41
+ int64_t element_size = (element_sizes == nullptr ? 1LL : element_sizes[arg]);
42
+ strides_[i][arg] = strides[arg][i] / element_size;
43
+ }
44
+ }
45
+ }
46
+
47
+ C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
48
+ offset_type offsets;
49
+ #pragma unroll
50
+ for (int arg = 0; arg < NARGS; arg++) {
51
+ offsets[arg] = 0;
52
+ }
53
+
54
+ #pragma unroll
55
+ for (int dim = 0; dim < MAX_DIMS; ++dim) {
56
+ if (dim == dims) {
57
+ break;
58
+ }
59
+ auto divmod = sizes_[dim].divmod(linear_idx);
60
+ linear_idx = divmod.div;
61
+
62
+ #pragma unroll
63
+ for (int arg = 0; arg < NARGS; arg++) {
64
+ offsets[arg] += divmod.mod * strides_[dim][arg];
65
+ }
66
+
67
+ }
68
+ return offsets;
69
+ }
70
+
71
+ int dims;
72
+ at::cuda::detail::IntDivider<index_t> sizes_[MAX_DIMS];
73
+ stride_t strides_[MAX_DIMS][std::max<int>(NARGS, 1)];
74
+ };
75
+
76
+ template <int NARGS, typename index_t = uint32_t>
77
+ struct TrivialOffsetCalculator {
78
+ // The offset for each argument. Wrapper around fixed-size array.
79
+ // The offsets are in # of elements, not in bytes.
80
+ // On CUDA, zero sized array is not allowed, so when we are handling nullary
81
+ // operators, we need to create a size 1 offset to avoid compiler failure.
82
+ // This size 1 offset is just a placeholder, and we will not use it.
83
+ using offset_type = at::detail::Array<index_t, std::max<int>(NARGS, 1)>;
84
+
85
+ C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
86
+ offset_type offsets;
87
+ #pragma unroll
88
+ for (int arg = 0; arg < NARGS; arg++) {
89
+ offsets[arg] = linear_idx;
90
+ }
91
+ return offsets;
92
+ }
93
+ };
94
+
95
+ // Make an OffsetCalculator with byte offsets
96
+ template<int N, bool signed_strides = false>
97
+ static OffsetCalculator<N, uint32_t, signed_strides> make_offset_calculator(const at::TensorIteratorBase& iter) {
98
+ TORCH_INTERNAL_ASSERT(N <= iter.ntensors());
99
+ std::array<const int64_t*, N> strides;
100
+ for (int i = 0; i < N; i++) {
101
+ strides[i] = iter.strides(i).data();
102
+ }
103
+ return OffsetCalculator<N, uint32_t, signed_strides>(iter.ndim(), iter.shape().data(), strides.data());
104
+ }
105
+
106
+ // Make an OffsetCalculator with element offsets
107
+ template<int N, bool signed_strides = false>
108
+ static OffsetCalculator<N, uint32_t, signed_strides> make_element_offset_calculator(
109
+ const at::TensorIteratorBase& iter) {
110
+ TORCH_INTERNAL_ASSERT(N <= iter.ntensors());
111
+ std::array<const int64_t*, N> strides;
112
+ std::array<int64_t, N> element_sizes;
113
+ for (int i = 0; i < N; i++) {
114
+ strides[i] = iter.strides(i).data();
115
+ element_sizes[i] = iter.element_size(i);
116
+ }
117
+ return OffsetCalculator<N, uint32_t, signed_strides>(
118
+ iter.ndim(), iter.shape().data(), strides.data(), element_sizes.data());
119
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // No "#pragma once" because this is a raw definition that can be copied by jit codegen.
2
+ // Eager mode clients should not include this file directly, instead,
3
+ // they should #include <ATen/cuda/PhiloxCudaState.h>, which has a #pragma once.
4
+
5
+ // Stores RNG state values. Passed as a kernel argument.
6
+ // See Note [CUDA Graph-safe RNG states].
7
+ //
8
+ // The raw definition lives in its own file so jit codegen can easily copy it.
9
+ namespace at {
10
+
11
+ struct PhiloxCudaState {
12
+ PhiloxCudaState() = default;
13
+ // Called if graph capture is not underway
14
+ PhiloxCudaState(uint64_t seed,
15
+ uint64_t offset) {
16
+ seed_.val = seed;
17
+ offset_.val = offset;
18
+ }
19
+ // Called if graph capture is underway
20
+ PhiloxCudaState(int64_t* seed,
21
+ int64_t* offset_extragraph,
22
+ uint32_t offset_intragraph) {
23
+ seed_.ptr = seed;
24
+ offset_.ptr = offset_extragraph;
25
+ offset_intragraph_ = offset_intragraph;
26
+ captured_ = true;
27
+ }
28
+
29
+ // Public members, directly accessible by at::cuda::philox::unpack.
30
+ // If we made them private with getters/setters, the getters/setters
31
+ // would have to be __device__, and we can't declare __device__ in ATen.
32
+ union Payload {
33
+ uint64_t val;
34
+ int64_t* ptr;
35
+ };
36
+
37
+ Payload seed_;
38
+ Payload offset_;
39
+ uint32_t offset_intragraph_ = 0;
40
+ bool captured_ = false;
41
+ };
42
+
43
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/CollapseDims.h>
4
+
5
+ namespace at::cuda::detail {
6
+
7
+ #define MAX_TENSORINFO_DIMS 25
8
+
9
+ // CUDA kernel argument that defines tensor layout
10
+ template <typename T, typename IndexType>
11
+ struct TensorInfo {
12
+ TensorInfo();
13
+ TensorInfo(T* p,
14
+ int dim,
15
+ IndexType sz[MAX_TENSORINFO_DIMS],
16
+ IndexType st[MAX_TENSORINFO_DIMS]);
17
+
18
+ // Set the size of the given dimension to 1, as if it were a
19
+ // reduction dim (allows you to calculate offsets of the reduction
20
+ // slice)
21
+ void reduceDim(int dim);
22
+
23
+ // See note on [collapse dims].
24
+ int collapseDims(const int excludeDim = -1);
25
+
26
+ // Contiguous tensors of more than one dimension are collapsed down
27
+ // to one tensor
28
+ __host__ __device__ inline bool isContiguous() const {
29
+ return (dims == 1 && strides[0] == 1);
30
+ }
31
+
32
+ T* data;
33
+ IndexType sizes[MAX_TENSORINFO_DIMS];
34
+ IndexType strides[MAX_TENSORINFO_DIMS];
35
+ int dims;
36
+ };
37
+
38
+ template <typename T, typename IndexType>
39
+ TensorInfo<T, IndexType>::TensorInfo() {
40
+ data = nullptr;
41
+ dims = 0;
42
+ }
43
+
44
+ template <typename T, typename IndexType>
45
+ TensorInfo<T, IndexType>::TensorInfo(T* p,
46
+ int dim,
47
+ IndexType sz[MAX_TENSORINFO_DIMS],
48
+ IndexType st[MAX_TENSORINFO_DIMS]) {
49
+ data = p;
50
+ dims = dim;
51
+ TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "CUDA Tensors cannot have more than 25 dimensions");
52
+
53
+ for (int i = 0; i < dim; ++i) {
54
+ sizes[i] = sz[i];
55
+ strides[i] = st[i];
56
+ }
57
+ }
58
+
59
+ template <typename T, typename IndexType>
60
+ void
61
+ TensorInfo<T, IndexType>::reduceDim(int dim) {
62
+ TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
63
+ sizes[dim] = 1;
64
+ }
65
+
66
+ template <typename T, typename IndexType>
67
+ int
68
+ TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
69
+ auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
70
+ dims = std::get<1>(result);
71
+ return std::get<0>(result);
72
+ }
73
+
74
+ // Translate a linear index for the apply to a T* offset;
75
+ // specialized on `Dims` to reduce nvcc compilation time
76
+ template <typename T, typename IndexType, int Dims>
77
+ struct IndexToOffset {
78
+ static __host__ __device__ IndexType get(
79
+ IndexType linearId,
80
+ const TensorInfo<T, IndexType>& info) {
81
+
82
+ IndexType offset = 0;
83
+
84
+ // Uses static dims
85
+ for (int i = Dims - 1; i > 0; --i) {
86
+ IndexType curDimIndex = linearId % info.sizes[i];
87
+ IndexType curDimOffset = curDimIndex * info.strides[i];
88
+ offset += curDimOffset;
89
+ linearId /= info.sizes[i];
90
+ }
91
+
92
+ return offset + linearId * info.strides[0];
93
+ }
94
+ };
95
+
96
+ // Uses dynamic (runtime) instead of static (compiletime) dims
97
+ template <typename T, typename IndexType>
98
+ struct IndexToOffset<T, IndexType, -1> {
99
+ static inline __host__ __device__ IndexType get(
100
+ IndexType linearId,
101
+ const TensorInfo<T, IndexType>& info) {
102
+
103
+ IndexType offset = 0;
104
+
105
+ for (int i = info.dims - 1; i > 0; --i) {
106
+ IndexType curDimIndex = linearId % info.sizes[i];
107
+ IndexType curDimOffset = curDimIndex * info.strides[i];
108
+ offset += curDimOffset;
109
+ linearId /= info.sizes[i];
110
+ }
111
+
112
+ return offset + linearId * info.strides[0];
113
+ }
114
+ };
115
+
116
+ } // namespace at::cuda::detail
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // No "#pragma once" because this is a raw definition that can be copied by jit codegen.
2
+ // Eager mode clients should not include this file directly, instead,
3
+ // they should #include <ATen/cuda/PhiloxUtils.cuh>, which has a #pragma once.
4
+
5
+ namespace at::cuda::philox {
6
+
7
+ // In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether
8
+ // that instance was created with graph capture underway or not.
9
+ // See Note [CUDA Graph-safe RNG states].
10
+ //
11
+ // We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen.
12
+ // Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable.
13
+ // Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
14
+ //
15
+ // The raw definition lives in its own file so jit codegen can easily copy it.
16
+ __host__ __device__ __forceinline__ std::tuple<uint64_t, uint64_t>
17
+ unpack(at::PhiloxCudaState arg) {
18
+ if (arg.captured_) {
19
+ // static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
20
+ // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
21
+ // For most threads' reads it will hit in cache, so it shouldn't hurt performance.
22
+ return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
23
+ } else {
24
+ return std::make_tuple(arg.seed_.val, arg.offset_.val);
25
+ }
26
+ }
27
+
28
+ } // namespace at::cuda::philox
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ #if AT_USE_JITERATOR()
5
+
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/util/SmallVector.h>
8
+ #include <ATen/core/Tensor.h>
9
+
10
+ #include <string>
11
+ #include <vector>
12
+
13
+ namespace at::cuda {
14
+
15
+ TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
16
+ const std::string& code_string,
17
+ const std::string& kernel_name,
18
+ const int num_outputs,
19
+ const c10::SmallVector<at::Tensor>& tensors,
20
+ const c10::SmallVector<at::Scalar>& extra_args,
21
+ bool return_by_ref);
22
+
23
+ } // namespace at::cuda
24
+
25
+ #else
26
+
27
+ namespace at::cuda {
28
+
29
+ TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
30
+ const std::string& code_string,
31
+ const std::string& kernel_name,
32
+ const int num_outputs,
33
+ const c10::SmallVector<at::Tensor>& tensors,
34
+ const c10::SmallVector<at::Scalar>& extra_args,
35
+ bool return_by_ref) {
36
+ TORCH_CHECK(false, "Jiterator is not supported");
37
+ }
38
+ } // namespace at::cuda
39
+
40
+ #endif // AT_USE_JITERATOR()
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <string>
13
+
14
+ #include <ATen/cuda/tunable/TunableOp.h>
15
+ #include <ATen/cuda/Exceptions.h>
16
+ #include <c10/util/StringUtil.h>
17
+
18
+ namespace at::cuda::tunable {
19
+
20
+ enum class BlasOp {
21
+ N = 0,
22
+ T = 1
23
+ };
24
+
25
+ inline std::string BlasOpToString(BlasOp op) {
26
+ switch (op) {
27
+ case BlasOp::N:
28
+ return "N";
29
+ case BlasOp::T:
30
+ return "T";
31
+ }
32
+ TORCH_CHECK(false, "unrecognized BlasOp");
33
+ return "N";
34
+ }
35
+
36
+ template <typename T>
37
+ struct GemmParams : OpParams {
38
+ std::string Signature() const override {
39
+ return c10::str(transa, transb, "_", m, "_", n, "_", k);
40
+ }
41
+
42
+ GemmParams* DeepCopy() const {
43
+ GemmParams* copy = new GemmParams;
44
+ *copy = *this;
45
+ c10::DeviceIndex device = 0;
46
+ AT_CUDA_CHECK(c10::cuda::GetDevice(&device));
47
+ size_t c_size = m * n * sizeof(T);
48
+ copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size));
49
+ AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync(
50
+ copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true));
51
+ return copy;
52
+ }
53
+
54
+ // only call on object returned by DeepCopy
55
+ void Delete() {
56
+ c10::cuda::CUDACachingAllocator::raw_delete(c);
57
+ }
58
+
59
+ TuningStatus NumericalCheck(GemmParams<T> *other) {
60
+ auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType<T>::value).device(at::kCUDA);
61
+ // comparison done as 1D tensor
62
+ at::Tensor ref = at::from_blob(c, {m*n}, options);
63
+ at::Tensor oth = at::from_blob(other->c, {m*n}, options);
64
+ at::Tensor ref_float = ref.to(at::kFloat);
65
+ at::Tensor oth_float = oth.to(at::kFloat);
66
+ std::vector<double> atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
67
+ std::vector<double> rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
68
+ double last_succeed_atol = 1;
69
+ double last_succeed_rtol = 1;
70
+ for (auto& atol : atols) {
71
+ for (auto& rtol : rtols) {
72
+ if (at::allclose(ref_float, oth_float, rtol, atol)) {
73
+ last_succeed_atol = atol;
74
+ last_succeed_rtol = rtol;
75
+ }
76
+ }
77
+ }
78
+ if (last_succeed_atol == 1) {
79
+ return FAIL;
80
+ }
81
+ else {
82
+ TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol);
83
+ }
84
+
85
+ return OK;
86
+ }
87
+
88
+ char transa;
89
+ char transb;
90
+ int64_t m;
91
+ int64_t n;
92
+ int64_t k;
93
+ at::opmath_type<T> alpha;
94
+ const T* a;
95
+ int64_t lda;
96
+ const T* b;
97
+ int64_t ldb;
98
+ at::opmath_type<T> beta;
99
+ T* c;
100
+ int64_t ldc;
101
+ };
102
+
103
+ template <typename T>
104
+ struct GemmStridedBatchedParams : OpParams {
105
+ std::string Signature() const override {
106
+ return c10::str(transa, transb, "_", m, "_", n, "_", k, "_B_", batch);
107
+ }
108
+
109
+ GemmStridedBatchedParams* DeepCopy() const {
110
+ GemmStridedBatchedParams* copy = new GemmStridedBatchedParams;
111
+ *copy = *this;
112
+ c10::DeviceIndex device = 0;
113
+ AT_CUDA_CHECK(c10::cuda::GetDevice(&device));
114
+ size_t c_size = batch * stride_c * sizeof(T);
115
+ copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size));
116
+ AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync(
117
+ copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true));
118
+ return copy;
119
+ }
120
+
121
+ // only call on object returned by DeepCopy
122
+ void Delete() {
123
+ c10::cuda::CUDACachingAllocator::raw_delete(c);
124
+ }
125
+
126
+ TuningStatus NumericalCheck(GemmStridedBatchedParams<T> *other) {
127
+ auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType<T>::value).device(at::kCUDA);
128
+ // comparison done as 1D tensor
129
+ at::Tensor ref = at::from_blob(c, {batch*stride_c}, options);
130
+ at::Tensor oth = at::from_blob(other->c, {batch*stride_c}, options);
131
+ at::Tensor ref_float = ref.to(at::kFloat);
132
+ at::Tensor oth_float = oth.to(at::kFloat);
133
+ std::vector<double> atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
134
+ std::vector<double> rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5};
135
+ double last_succeed_atol = 1;
136
+ double last_succeed_rtol = 1;
137
+ for (auto& atol : atols) {
138
+ for (auto& rtol : rtols) {
139
+ if (at::allclose(ref_float, oth_float, rtol, atol)) {
140
+ last_succeed_atol = atol;
141
+ last_succeed_rtol = rtol;
142
+ }
143
+ }
144
+ }
145
+ if (last_succeed_atol == 1) {
146
+ return FAIL;
147
+ }
148
+ else {
149
+ TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol);
150
+ }
151
+
152
+ return OK;
153
+ }
154
+
155
+ char transa;
156
+ char transb;
157
+ int64_t m;
158
+ int64_t n;
159
+ int64_t k;
160
+ at::opmath_type<T> alpha;
161
+ const T* a;
162
+ int64_t lda;
163
+ int64_t stride_a;
164
+ const T* b;
165
+ int64_t ldb;
166
+ int64_t stride_b;
167
+ at::opmath_type<T> beta;
168
+ T* c;
169
+ int64_t ldc;
170
+ int64_t stride_c;
171
+ int64_t batch;
172
+ };
173
+
174
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ #include <ATen/cuda/CUDAContext.h>
7
+ #include <ATen/cuda/tunable/TunableOp.h>
8
+ #include <ATen/cuda/tunable/GemmCommon.h>
9
+ #include <c10/cuda/CUDACachingAllocator.h>
10
+ #include <c10/util/StringUtil.h>
11
+
12
+ #include <hipblaslt/hipblaslt.h>
13
+ #include <hipblaslt/hipblaslt-ext.hpp>
14
+
15
+ #define TORCH_HIPBLASLT_CHECK(EXPR) \
16
+ do { \
17
+ hipblasStatus_t __err = EXPR; \
18
+ TORCH_CHECK(__err == HIPBLAS_STATUS_SUCCESS, \
19
+ "hipblaslt error: ", \
20
+ hipblasStatusToString(__err), \
21
+ " when calling `" #EXPR "`"); \
22
+ } while (0)
23
+
24
+ namespace at::cuda::tunable {
25
+
26
+ #ifdef HIPBLASLT_HAS_GETINDEXFROMALGO
27
+ #define GETINDEXFROMALGO(algo) hipblaslt_ext::getIndexFromAlgo(algo)
28
+ #else
29
+ static int getIndexFromAlgo(hipblasLtMatmulAlgo_t& algo) {
30
+ int* algo_ptr = (int*)algo.data;
31
+ if(*algo_ptr < 0) {
32
+ return -1;
33
+ }
34
+ return *algo_ptr;
35
+ }
36
+ #define GETINDEXFROMALGO(algo) getIndexFromAlgo(algo)
37
+ #endif
38
+
39
+ #ifdef HIPBLASLT_CUSTOM_COMPUTE_TYPE
40
+ #define COMPUTE_TYPE_32 HIPBLASLT_COMPUTE_F32
41
+ #else
42
+ #define COMPUTE_TYPE_32 HIPBLAS_COMPUTE_32F
43
+ #endif
44
+
45
+ #ifdef HIPBLASLT_CUSTOM_DATA_TYPE
46
+
47
+ template <typename T>
48
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor();
49
+
50
+ template <>
51
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<float>() {
52
+ return HIPBLASLT_R_32F;
53
+ }
54
+
55
+ template <>
56
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<Half>() {
57
+ return HIPBLASLT_R_16F;
58
+ }
59
+
60
+ template <>
61
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<BFloat16>() {
62
+ return HIPBLASLT_R_16B;
63
+ }
64
+
65
+ template <>
66
+ constexpr hipblasltDatatype_t HipBlasDataTypeFor<double>() {
67
+ return HIPBLASLT_R_64F;
68
+ }
69
+
70
+ #define DATA_TYPE_R_32 HIPBLASLT_R_32F
71
+
72
+ #else
73
+
74
+ template <typename T>
75
+ constexpr hipblasDatatype_t HipBlasDataTypeFor();
76
+
77
+ template <>
78
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<float>() {
79
+ return HIPBLAS_R_32F;
80
+ }
81
+
82
+ template <>
83
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<Half>() {
84
+ return HIPBLAS_R_16F;
85
+ }
86
+
87
+ template <>
88
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<BFloat16>() {
89
+ return HIPBLAS_R_16B;
90
+ }
91
+
92
+ template <>
93
+ constexpr hipblasDatatype_t HipBlasDataTypeFor<double>() {
94
+ return HIPBLAS_R_64F;
95
+ }
96
+
97
+ #ifdef HIPBLAS_V2
98
+ #define DATA_TYPE_R_32 HIP_R_32F
99
+ #else
100
+ #define DATA_TYPE_R_32 HIPBLAS_R_32F
101
+ #endif
102
+
103
+ #endif
104
+
105
+ template <typename T, typename ParamsT>
106
+ int GetBatchFromParams(const ParamsT* params) {
107
+ return 1;
108
+ }
109
+
110
+ template <typename T>
111
+ int GetBatchFromParams(const GemmStridedBatchedParams<T>* params) {
112
+ return params->batch;
113
+ }
114
+
115
+ template <typename T, typename ParamsT>
116
+ int GetStrideAFromParams(const ParamsT* params) {
117
+ return 1;
118
+ }
119
+
120
+ template <typename T>
121
+ int GetStrideAFromParams(const GemmStridedBatchedParams<T>* params) {
122
+ return params->stride_a;
123
+ }
124
+
125
+ template <typename T, typename ParamsT>
126
+ int GetStrideBFromParams(const ParamsT* params) {
127
+ return 1;
128
+ }
129
+
130
+ template <typename T>
131
+ int GetStrideBFromParams(const GemmStridedBatchedParams<T>* params) {
132
+ return params->stride_b;
133
+ }
134
+
135
+ template <typename T, typename ParamsT>
136
+ int GetStrideCFromParams(const ParamsT* params) {
137
+ return 1;
138
+ }
139
+
140
+ template <typename T>
141
+ int GetStrideCFromParams(const GemmStridedBatchedParams<T>* params) {
142
+ return params->stride_c;
143
+ }
144
+
145
+ static hipblasOperation_t _hipblasOpFromChar(char op) {
146
+ switch (op) {
147
+ case 'n':
148
+ case 'N':
149
+ return HIPBLAS_OP_N;
150
+ case 't':
151
+ case 'T':
152
+ return HIPBLAS_OP_T;
153
+ case 'c':
154
+ case 'C':
155
+ return HIPBLAS_OP_C;
156
+ }
157
+ AT_ERROR(
158
+ "_hipblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
159
+ }
160
+
161
+ static char _charFromhipblasOp(hipblasOperation_t op) {
162
+ switch (op) {
163
+ case HIPBLAS_OP_N:
164
+ return 'N';
165
+ case HIPBLAS_OP_T:
166
+ return 'T';
167
+ case HIPBLAS_OP_C:
168
+ return 'C';
169
+ }
170
+ AT_ERROR(
171
+ "_charFromhipblasOp input should be HIPBLAS_OP_N/T/C but got `", op, "`");
172
+ }
173
+
174
+ static hipblasOperation_t MapLayoutToHipBlasLt(BlasOp layout) {
175
+ if (layout == BlasOp::N) {
176
+ return HIPBLAS_OP_N;
177
+ }
178
+ return HIPBLAS_OP_T;
179
+ }
180
+
181
+ static size_t GetHipblasltWorkspaceSize() {
182
+ static const char * env = getenv("HIPBLASLT_WORKSPACE_SIZE");
183
+ // 256MB is max workspace size allowed for hipblaslt
184
+ // hipblaslt-bench uses 32MB
185
+ // recommendation from hipblaslt author was 76MB
186
+ size_t workspace_size = 2*128*1024*1024; // default 256MB
187
+ if (env) {
188
+ try {
189
+ workspace_size = std::stoi(env);
190
+ } catch(std::invalid_argument const& e) {
191
+ TORCH_WARN("invalid HIPBLASLT_WORKSPACE_SIZE,",
192
+ " using default workspace size of ", workspace_size, " bytes.");
193
+ } catch(std::out_of_range const& e) {
194
+ TORCH_WARN("HIPBLASLT_WORKSPACE_SIZE out of range,",
195
+ " using default workspace size of ", workspace_size, " bytes.");
196
+ }
197
+ }
198
+ return workspace_size;
199
+ }
200
+
201
+ template <typename T, BlasOp ALayout, BlasOp BLayout, typename ParamsT>
202
+ class HipblasltGemmOp : public Callable<ParamsT> {
203
+ public:
204
+ HipblasltGemmOp(hipblasLtMatmulAlgo_t algo) : algo_{algo} {}
205
+
206
+ TuningStatus Call(const ParamsT* params) override {
207
+ hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout);
208
+ hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout);
209
+ auto in_out_datatype = HipBlasDataTypeFor<T>();
210
+ auto opa = _hipblasOpFromChar(params->transa);
211
+ auto opb = _hipblasOpFromChar(params->transb);
212
+
213
+ TORCH_CHECK(transa_outer == opa && transb_outer == opb, "trans mismatch, shouldn't happen");
214
+
215
+ float alpha = static_cast<float>(params->alpha);
216
+ float beta = static_cast<float>(params->beta);
217
+
218
+ hipblasLtMatrixLayout_t mat_a, mat_b, mat_c;
219
+ hipblasLtMatmulDesc_t matmul;
220
+ if (opa == HIPBLAS_OP_N) {
221
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->m, params->k, params->lda));
222
+ }
223
+ else {
224
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->k, params->m, params->lda));
225
+ }
226
+ if (opb == HIPBLAS_OP_N) {
227
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->k, params->n, params->ldb));
228
+ }
229
+ else {
230
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->n, params->k, params->ldb));
231
+ }
232
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_c, in_out_datatype, params->m, params->n, params->ldc));
233
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescCreate(&matmul, COMPUTE_TYPE_32, DATA_TYPE_R_32));
234
+
235
+ int batch = GetBatchFromParams<T>(params);
236
+ if (batch > 1) {
237
+ int64_t stride_a = GetStrideAFromParams<T>(params);
238
+ int64_t stride_b = GetStrideBFromParams<T>(params);
239
+ int64_t stride_c = GetStrideCFromParams<T>(params);
240
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
241
+ mat_a, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
242
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
243
+ mat_a, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_a, sizeof(stride_a)));
244
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
245
+ mat_b, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
246
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
247
+ mat_b, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_b, sizeof(stride_b)));
248
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
249
+ mat_c, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch)));
250
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute(
251
+ mat_c, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_c, sizeof(stride_c)));
252
+ }
253
+
254
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute(
255
+ matmul, HIPBLASLT_MATMUL_DESC_TRANSA, &opa, sizeof(int32_t)));
256
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute(
257
+ matmul, HIPBLASLT_MATMUL_DESC_TRANSB, &opb, sizeof(int32_t)));
258
+
259
+ size_t workspace_size = GetHipblasltWorkspaceSize();
260
+
261
+ auto op_handle = at::cuda::getCurrentCUDABlasLtHandle();
262
+
263
+ size_t ret_workspace_size = 0;
264
+ auto status = hipblaslt_ext::matmulIsAlgoSupported(op_handle,
265
+ matmul,
266
+ &alpha,
267
+ mat_a,
268
+ mat_b,
269
+ &beta,
270
+ mat_c,
271
+ mat_c,
272
+ algo_,
273
+ ret_workspace_size);
274
+
275
+ if (status == HIPBLAS_STATUS_SUCCESS) {
276
+ if (ret_workspace_size >= workspace_size) {
277
+ //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " workspace too large");
278
+ return FAIL;
279
+ }
280
+ }
281
+ else {
282
+ //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " not supported");
283
+ return FAIL;
284
+ }
285
+
286
+ void* workspace_buffer = nullptr;
287
+ if (workspace_size > 0) {
288
+ workspace_buffer = c10::cuda::CUDACachingAllocator::raw_alloc(workspace_size);
289
+ }
290
+
291
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmul(op_handle,
292
+ matmul,
293
+ &alpha,
294
+ params->a,
295
+ mat_a,
296
+ params->b,
297
+ mat_b,
298
+ &beta,
299
+ params->c,
300
+ mat_c,
301
+ params->c,
302
+ mat_c,
303
+ &algo_,
304
+ workspace_buffer,
305
+ workspace_size,
306
+ at::cuda::getCurrentCUDAStream()));
307
+
308
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescDestroy(matmul));
309
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_a));
310
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_b));
311
+ TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_c));
312
+ if (workspace_size > 0) {
313
+ c10::cuda::CUDACachingAllocator::raw_delete(workspace_buffer);
314
+ }
315
+ return OK;
316
+ }
317
+
318
+ private:
319
+ hipblasLtMatmulAlgo_t algo_;
320
+ };
321
+
322
+ template <typename T, BlasOp ALayout, BlasOp BLayout, typename ParamsT>
323
+ auto GetHipBlasLtTypeStringAndOps() {
324
+ hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout);
325
+ hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout);
326
+ auto in_out_datatype = HipBlasDataTypeFor<T>();
327
+ std::vector<hipblasLtMatmulHeuristicResult_t> heuristic_result;
328
+
329
+ hipblasLtHandle_t handle;
330
+ TORCH_HIPBLASLT_CHECK(hipblasLtCreate(&handle));
331
+ TORCH_HIPBLASLT_CHECK(hipblaslt_ext::getAllAlgos(handle,
332
+ hipblaslt_ext::GemmType::HIPBLASLT_GEMM,
333
+ transa_outer,
334
+ transb_outer,
335
+ in_out_datatype,
336
+ in_out_datatype,
337
+ in_out_datatype,
338
+ in_out_datatype,
339
+ COMPUTE_TYPE_32,
340
+ heuristic_result));
341
+ TORCH_HIPBLASLT_CHECK(hipblasLtDestroy(handle));
342
+
343
+ // Sort heuristic_result by algo index to make sure the order of returned algos is deterministic.
344
+ std::sort(heuristic_result.begin(),
345
+ heuristic_result.end(),
346
+ [](hipblasLtMatmulHeuristicResult_t& a, hipblasLtMatmulHeuristicResult_t& b) {
347
+ return GETINDEXFROMALGO(a.algo) < GETINDEXFROMALGO(b.algo);
348
+ });
349
+
350
+ int returned_algo_count = heuristic_result.size();
351
+ std::vector<std::pair<std::string, std::unique_ptr<Callable<ParamsT>>>> ret;
352
+ for (int i = 0; i < returned_algo_count; i++) {
353
+ auto algo = heuristic_result[i].algo;
354
+ int algo_index = GETINDEXFROMALGO(algo);
355
+ auto callable = std::make_unique<HipblasltGemmOp<T, ALayout, BLayout, ParamsT>>(algo);
356
+ std::string type_string = c10::str(
357
+ "Gemm_Hipblaslt_", _charFromhipblasOp(transa_outer), _charFromhipblasOp(transb_outer), "_", algo_index);
358
+ ret.emplace_back(type_string, std::move(callable));
359
+ }
360
+
361
+ return ret;
362
+ }
363
+
364
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
365
+ auto GetHipBlasLtGemmTypeStringAndOps() {
366
+ return GetHipBlasLtTypeStringAndOps<T, ALayout, BLayout, GemmParams<T>>();
367
+ }
368
+
369
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
370
+ auto GetHipBlasLtGemmStridedBatchedTypeStringAndOps() {
371
+ return GetHipBlasLtTypeStringAndOps<T, ALayout, BLayout, GemmStridedBatchedParams<T>>();
372
+ }
373
+
374
+ #undef TORCH_HIPBLASLT_CHECK
375
+ #undef GETINDEXFROMALGO
376
+ #undef COMPUTE_TYPE_32
377
+ #undef DATA_TYPE_R_32
378
+
379
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ #include <ATen/cuda/CUDAContext.h>
7
+ #include <ATen/cuda/tunable/TunableOp.h>
8
+ #include <ATen/cuda/tunable/GemmCommon.h>
9
+ #include <c10/util/StringUtil.h>
10
+
11
+ #define ROCBLAS_BETA_FEATURES_API
12
+ #include <rocblas/rocblas.h>
13
+
14
+ #define TORCH_ROCBLAS_CHECK(EXPR) \
15
+ do { \
16
+ rocblas_status __err = EXPR; \
17
+ TORCH_CHECK(__err == rocblas_status_success, \
18
+ "rocblas error: ", \
19
+ rocblas_status_to_string(__err), \
20
+ " when calling `" #EXPR "`"); \
21
+ } while (0)
22
+
23
+ namespace at::cuda::tunable {
24
+
25
+ template <typename T>
26
+ constexpr rocblas_datatype RocBlasDataTypeFor();
27
+
28
+ template <>
29
+ constexpr rocblas_datatype RocBlasDataTypeFor<float>() {
30
+ return rocblas_datatype_f32_r;
31
+ }
32
+
33
+ template <>
34
+ constexpr rocblas_datatype RocBlasDataTypeFor<double>() {
35
+ return rocblas_datatype_f64_r;
36
+ }
37
+
38
+ template <>
39
+ constexpr rocblas_datatype RocBlasDataTypeFor<Half>() {
40
+ return rocblas_datatype_f16_r;
41
+ }
42
+
43
+ template <>
44
+ constexpr rocblas_datatype RocBlasDataTypeFor<BFloat16>() {
45
+ return rocblas_datatype_bf16_r;
46
+ }
47
+
48
+ template <>
49
+ constexpr rocblas_datatype RocBlasDataTypeFor<c10::complex<float>>() {
50
+ return rocblas_datatype_f32_c;
51
+ }
52
+
53
+ template <>
54
+ constexpr rocblas_datatype RocBlasDataTypeFor<c10::complex<double>>() {
55
+ return rocblas_datatype_f64_c;
56
+ }
57
+
58
+ template <typename T>
59
+ constexpr rocblas_datatype RocBlasComputeTypeFor();
60
+
61
+ template <>
62
+ constexpr rocblas_datatype RocBlasComputeTypeFor<float>() {
63
+ return rocblas_datatype_f32_r;
64
+ }
65
+
66
+ template <>
67
+ constexpr rocblas_datatype RocBlasComputeTypeFor<double>() {
68
+ return rocblas_datatype_f64_r;
69
+ }
70
+
71
+ template <>
72
+ constexpr rocblas_datatype RocBlasComputeTypeFor<Half>() {
73
+ // Note that we're returning the _compute_ type for a given datatype.
74
+ // As of 12/2022, using compute type FP16 for 16-bit floats was much
75
+ // slower than using compute type FP32. So we use FP32 compute even for
76
+ // FP16 datatypes. This is how GEMM is implemented even in the function
77
+ // rocblasGemmHelper (see fpgeneric.h)
78
+ return rocblas_datatype_f32_r;
79
+ }
80
+
81
+ template <>
82
+ constexpr rocblas_datatype RocBlasComputeTypeFor<BFloat16>() {
83
+ // Note that we're returning the _compute_ type for a given datatype.
84
+ // As of 12/2022, using compute type FP16 for 16-bit floats was much
85
+ // slower than using compute type FP32. So we use FP32 compute even for
86
+ // BF16 datatypes. This is how GEMM is implemented even in the function
87
+ // rocblasGemmHelper (see fpgeneric.h)
88
+ return rocblas_datatype_f32_r;
89
+ }
90
+
91
+ template <>
92
+ constexpr rocblas_datatype RocBlasComputeTypeFor<c10::complex<float>>() {
93
+ return rocblas_datatype_f32_c;
94
+ }
95
+
96
+ template <>
97
+ constexpr rocblas_datatype RocBlasComputeTypeFor<c10::complex<double>>() {
98
+ return rocblas_datatype_f64_c;
99
+ }
100
+
101
+ template <typename T>
102
+ auto DoCastForHalfOrBfloat16(const T fp) {
103
+ return fp;
104
+ }
105
+
106
+ template <>
107
+ inline auto DoCastForHalfOrBfloat16<Half>(const Half fp) {
108
+ // alpha and beta should be the same as compute_type, in Half case it is float.
109
+ float h = fp;
110
+ return h;
111
+ }
112
+
113
+ template <>
114
+ inline auto DoCastForHalfOrBfloat16<BFloat16>(const BFloat16 fp) {
115
+ // alpha and beta should be the same as compute_type, in bfloat16 case it is float.
116
+ float h = fp;
117
+ return h;
118
+ }
119
+
120
+ static rocblas_operation _rocblasOpFromChar(char op) {
121
+ switch (op) {
122
+ case 'n':
123
+ case 'N':
124
+ return rocblas_operation_none;
125
+ case 't':
126
+ case 'T':
127
+ return rocblas_operation_transpose;
128
+ case 'c':
129
+ case 'C':
130
+ return rocblas_operation_conjugate_transpose;
131
+ }
132
+ AT_ERROR(
133
+ "_rocblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
134
+ }
135
+
136
+ template <typename T>
137
+ class RocblasGemmOp : public Callable<GemmParams<T>> {
138
+ public:
139
+ RocblasGemmOp(int solution) : solution_{solution} {}
140
+
141
+ TuningStatus Call(const GemmParams<T>* params) override {
142
+ auto input_output_type = RocBlasDataTypeFor<T>();
143
+ auto compute_type = RocBlasComputeTypeFor<T>();
144
+ auto h_a = DoCastForHalfOrBfloat16(params->alpha);
145
+ auto h_b = DoCastForHalfOrBfloat16(params->beta);
146
+ auto status = rocblas_gemm_ex(
147
+ (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(),
148
+ _rocblasOpFromChar(params->transa),
149
+ _rocblasOpFromChar(params->transb),
150
+ params->m, params->n, params->k,
151
+ &h_a,
152
+ params->a, input_output_type, params->lda,
153
+ params->b, input_output_type, params->ldb,
154
+ &h_b,
155
+ params->c, input_output_type, params->ldc,
156
+ params->c, input_output_type, params->ldc,
157
+ compute_type,
158
+ rocblas_gemm_algo_solution_index,
159
+ solution_,
160
+ rocblas_gemm_flags_none);
161
+ if (status != rocblas_status_success) {
162
+ return FAIL;
163
+ }
164
+ return OK;
165
+ }
166
+
167
+ private:
168
+ int solution_;
169
+ };
170
+
171
+ template <typename T>
172
+ auto GetRocBlasGemmTypeStringAndOps() {
173
+ rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle();
174
+ int solution_size;
175
+ auto input_output_type = RocBlasDataTypeFor<T>();
176
+ auto compute_type = RocBlasComputeTypeFor<T>();
177
+ // Get the number of available solutions
178
+ TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
179
+ input_output_type,
180
+ input_output_type,
181
+ compute_type,
182
+ rocblas_gemm_flags_none,
183
+ nullptr,
184
+ &solution_size));
185
+ std::vector<int> solutions(solution_size);
186
+ // Get the list of available solutions
187
+ TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
188
+ input_output_type,
189
+ input_output_type,
190
+ compute_type,
191
+ rocblas_gemm_flags_none,
192
+ solutions.data(),
193
+ &solution_size));
194
+ // Sort the solutions in ascending order to make the solution vector deterministic across runs
195
+ std::sort(solutions.begin(), solutions.end());
196
+
197
+ std::vector<std::pair<std::string, std::unique_ptr<Callable<GemmParams<T>>>>> ret;
198
+ for (size_t i = 0; i < solutions.size(); ++i) {
199
+ auto callable = std::make_unique<RocblasGemmOp<T>>(solutions[i]);
200
+ ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable)));
201
+ }
202
+ return ret;
203
+ }
204
+
205
+ template <typename T>
206
+ class RocblasGemmStridedBatchedOp : public Callable<GemmStridedBatchedParams<T>> {
207
+ public:
208
+ RocblasGemmStridedBatchedOp(int solution) : solution_{solution} {}
209
+
210
+ TuningStatus Call(const GemmStridedBatchedParams<T>* params) override {
211
+ auto input_output_type = RocBlasDataTypeFor<T>();
212
+ auto compute_type = RocBlasComputeTypeFor<T>();
213
+ auto h_a = DoCastForHalfOrBfloat16(params->alpha);
214
+ auto h_b = DoCastForHalfOrBfloat16(params->beta);
215
+ auto status = rocblas_gemm_strided_batched_ex(
216
+ (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(),
217
+ _rocblasOpFromChar(params->transa),
218
+ _rocblasOpFromChar(params->transb),
219
+ params->m, params->n, params->k,
220
+ &h_a,
221
+ params->a, input_output_type, params->lda, params->stride_a,
222
+ params->b, input_output_type, params->ldb, params->stride_b,
223
+ &h_b,
224
+ params->c, input_output_type, params->ldc, params->stride_c,
225
+ params->c, input_output_type, params->ldc, params->stride_c,
226
+ params->batch,
227
+ compute_type,
228
+ rocblas_gemm_algo_solution_index,
229
+ solution_,
230
+ rocblas_gemm_flags_none);
231
+ if (status != rocblas_status_success) {
232
+ return FAIL;
233
+ }
234
+ return OK;
235
+ }
236
+
237
+ private:
238
+ int solution_;
239
+ };
240
+
241
+ template <typename T>
242
+ auto GetRocBlasGemmStridedBatchedTypeStringAndOps() {
243
+ rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle();
244
+ int solution_size;
245
+ auto input_output_type = RocBlasDataTypeFor<T>();
246
+ auto compute_type = RocBlasComputeTypeFor<T>();
247
+ // Get the number of available solutions
248
+ TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
249
+ input_output_type,
250
+ input_output_type,
251
+ compute_type,
252
+ rocblas_gemm_flags_none,
253
+ nullptr,
254
+ &solution_size));
255
+ std::vector<int> solutions(solution_size);
256
+ // Get the list of available solutions
257
+ TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
258
+ input_output_type,
259
+ input_output_type,
260
+ compute_type,
261
+ rocblas_gemm_flags_none,
262
+ solutions.data(),
263
+ &solution_size));
264
+ // Sort the solutions in ascending order to make the solution vector deterministic across runs
265
+ std::sort(solutions.begin(), solutions.end());
266
+
267
+ std::vector<std::pair<std::string, std::unique_ptr<Callable<GemmStridedBatchedParams<T>>>>> ret;
268
+ for (size_t i = 0; i < solutions.size(); ++i) {
269
+ auto callable = std::make_unique<RocblasGemmStridedBatchedOp<T>>(solutions[i]);
270
+ ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable)));
271
+ }
272
+ return ret;
273
+ }
274
+
275
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <cuda_runtime.h>
13
+
14
+ #include <ATen/cuda/tunable/Tunable.h>
15
+
16
+ namespace at::cuda::tunable {
17
+
18
+ class StreamTimer : public ITimer {
19
+ public:
20
+ StreamTimer();
21
+ virtual ~StreamTimer();
22
+
23
+ void Start() override;
24
+
25
+ void End() override;
26
+
27
+ float Duration() override;
28
+
29
+ private:
30
+ cudaEvent_t start_;
31
+ cudaEvent_t end_;
32
+ };
33
+
34
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/Tunable.h ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <c10/util/CallOnce.h>
13
+
14
+ #include <functional>
15
+ #include <iostream>
16
+ #include <memory>
17
+ #include <mutex>
18
+ #include <string>
19
+ #include <type_traits>
20
+ #include <unordered_map>
21
+ #include <utility>
22
+ #include <vector>
23
+
24
+ namespace at::cuda::tunable {
25
+
26
+ static void TunableLog(const std::string& msg) {
27
+ static const char *env = getenv("PYTORCH_TUNABLEOP_VERBOSE");
28
+ if (env != nullptr && strcmp(env, "1") == 0) {
29
+ std::cerr << msg << std::endl;
30
+ }
31
+ }
32
+ #define TUNABLE_LOG(...) TunableLog(c10::str(__VA_ARGS__))
33
+
34
+ enum TuningStatus {
35
+ OK = 0,
36
+ FAIL = 1,
37
+ UNSUPPORTED = 2,
38
+ };
39
+
40
+ // Mapping from params signature to kernel id
41
+ class ResultEntry {
42
+ public:
43
+ explicit ResultEntry(const std::string& key, double time) : key_(key), time_(time) {}
44
+ bool operator==(const ResultEntry& other) { return key_ == other.key_; }
45
+ bool operator!=(const ResultEntry& other) { return key_ != other.key_; }
46
+ operator std::string () { return key_; }
47
+ friend std::ostream& operator<<(std::ostream& stream, const ResultEntry& entry);
48
+ static ResultEntry Null() { return ResultEntry("Null", 0.0); }
49
+ static ResultEntry Default() { return ResultEntry("Default", 0.0); }
50
+
51
+ private:
52
+ std::string key_;
53
+ double time_;
54
+ };
55
+
56
+ typedef std::unordered_map<std::string, ResultEntry> KernelMap;
57
+ typedef std::unordered_map<std::string, KernelMap> ResultsMap;
58
+
59
+ struct TuningResults {
60
+ // Validates if these results are compatible with the libraries
61
+ std::unordered_map<std::string, std::string> validators;
62
+
63
+ // Mapping from Callable signature to Callable's tuning result
64
+ ResultsMap results;
65
+ };
66
+
67
+ class TuningResultsManager {
68
+ public:
69
+ TuningResultsManager() = default;
70
+ ~TuningResultsManager() = default;
71
+
72
+ KernelMap Lookup(const std::string& op_signature);
73
+
74
+ ResultEntry Lookup(const std::string& op_signature, const std::string& params_signature);
75
+
76
+ inline void AddImpl(const std::string& op_signature,
77
+ const std::string& params_signature,
78
+ ResultEntry best,
79
+ KernelMap& kernel_map);
80
+
81
+ void Add(const std::string& op_signature,
82
+ const std::string& params_signature,
83
+ ResultEntry best);
84
+
85
+ void Delete(const std::string& op_signature, const std::string& params_signature);
86
+
87
+ inline void DisjointMergeImpl(
88
+ const std::string& op_signature,
89
+ const KernelMap& kernel_map,
90
+ /*out*/ ResultsMap& results);
91
+
92
+ void Load(const ResultsMap& results_to_load);
93
+
94
+ ResultsMap Dump();
95
+
96
+ void DisjointMerge(const std::string& op_signature, const KernelMap& kernel_map);
97
+
98
+ size_t GetSize();
99
+
100
+ private:
101
+ std::mutex lock_;
102
+ ResultsMap results_;
103
+ };
104
+
105
+ class TuningResultsValidator {
106
+ public:
107
+ using GetFunc = std::function<std::string()>;
108
+ using ValidateFunc = std::function<TuningStatus(const std::string&)>;
109
+ using GetValidateFuncs = std::unordered_map<std::string, std::pair<GetFunc, ValidateFunc>>;
110
+
111
+ TuningResultsValidator();
112
+ ~TuningResultsValidator() = default;
113
+
114
+ std::unordered_map<std::string, std::string> GetAllValidators() const;
115
+ TuningStatus ValidateAll(const std::unordered_map<std::string, std::string>& to_validate) const;
116
+ void RegisterValidator(const std::string& key, const GetFunc& gf, const ValidateFunc& vf);
117
+
118
+ protected:
119
+ std::string GetPyTorchVersion() const;
120
+ TuningStatus ValidatePyTorchVersion(const std::string& value) const;
121
+
122
+ public:
123
+ static constexpr const std::array mandatory_keys{"PT_VERSION"};
124
+
125
+ private:
126
+ GetValidateFuncs validators_;
127
+ };
128
+
129
+ class TuningContext {
130
+ public:
131
+ TuningContext();
132
+ ~TuningContext();
133
+ TuningContext(TuningContext &) = delete;
134
+ TuningContext(TuningContext &&) = delete;
135
+ TuningContext &operator=(TuningContext &) = delete;
136
+ TuningContext &operator=(TuningContext &&) = delete;
137
+
138
+ void EnableTunableOp();
139
+ void DisableTunableOp();
140
+ bool IsTunableOpEnabled() const;
141
+
142
+ void EnableTuning();
143
+ void DisableTuning();
144
+ bool IsTuningEnabled() const;
145
+
146
+ void SetMaxTuningDurationMs(int max_duration_ms);
147
+ int GetMaxTuningDurationMs() const;
148
+
149
+ void SetMaxTuningIterations(int max_iter);
150
+ int GetMaxTuningIterations() const;
151
+
152
+ void SetMaxWarmupDurationMs(int max_duration_ms);
153
+ int GetMaxWarmupDurationMs() const;
154
+
155
+ void SetMaxWarmupIterations(int max_iter);
156
+ int GetMaxWarmupIterations() const;
157
+
158
+ void EnableTunableOpAndTuning();
159
+ void DisableTunableOpAndTuning();
160
+
161
+ TuningResultsManager& GetTuningResultsManager();
162
+
163
+ TuningResultsValidator& GetTuningResultsValidator();
164
+
165
+ TuningResults GetTuningResults();
166
+
167
+ TuningStatus LoadTuningResults(const TuningResults& tr);
168
+
169
+ void SetFilename(const std::string& filename);
170
+ std::string GetFilename() const;
171
+
172
+ protected:
173
+ bool ReadFile(const std::string& filename);
174
+ bool WriteFile(const std::string& filename);
175
+
176
+ private:
177
+ bool enable_;
178
+ bool tuning_enable_;
179
+ bool manager_initialized_;
180
+ int max_tuning_duration_ms_;
181
+ int max_tuning_iterations_;
182
+ int max_warmup_duration_ms_;
183
+ int max_warmup_iterations_;
184
+ mutable TuningResultsManager manager_;
185
+ mutable c10::once_flag manager_init_once_;
186
+ TuningResultsValidator validator_;
187
+ std::string filename_;
188
+ size_t results_count_from_input_file_;
189
+ };
190
+
191
+ TuningContext* getTuningContext();
192
+
193
+ class ITimer {
194
+ public:
195
+ ITimer() = default;
196
+ virtual ~ITimer() = default;
197
+
198
+ virtual void Start() = 0;
199
+ virtual void End() = 0;
200
+
201
+ /// Computes the elapsed time in milliseconds between Start() and End()
202
+ virtual float Duration() = 0;
203
+ };
204
+
205
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <ATen/cuda/tunable/GemmCommon.h>
13
+ #ifdef USE_ROCM
14
+ #if ROCM_VERSION >= 50700
15
+ #include <ATen/cuda/tunable/GemmHipblaslt.h>
16
+ #endif
17
+ #include <ATen/cuda/tunable/GemmRocblas.h>
18
+ #endif
19
+ #include <ATen/cuda/tunable/StreamTimer.h>
20
+ #include <ATen/cuda/tunable/TunableOp.h>
21
+ #include <c10/cuda/CUDACachingAllocator.h>
22
+ #include <c10/util/StringUtil.h>
23
+
24
+ #ifdef USE_ROCM
25
+ #include <rocm-core/rocm_version.h>
26
+ #endif
27
+
28
+ #define STRINGIFY(s) #s
29
+ #define XSTRINGIFY(s) STRINGIFY(s)
30
+
31
+ namespace at::cuda::tunable {
32
+
33
+ template <typename T>
34
+ class DefaultGemmOp : public Callable<GemmParams<T>> {
35
+ public:
36
+ TuningStatus Call(const GemmParams<T>* params) override {
37
+ at::cuda::blas::gemm_internal<T>(
38
+ params->transa, params->transb,
39
+ params->m, params->n, params->k,
40
+ params->alpha,
41
+ params->a, params->lda,
42
+ params->b, params->ldb,
43
+ params->beta,
44
+ params->c, params->ldc);
45
+ return OK;
46
+ }
47
+ };
48
+
49
+ template <typename T>
50
+ class DefaultGemmStridedBatchedOp : public Callable<GemmStridedBatchedParams<T>> {
51
+ public:
52
+ TuningStatus Call(const GemmStridedBatchedParams<T>* params) override {
53
+ at::cuda::blas::bgemm_internal<T>(
54
+ params->transa, params->transb,
55
+ params->m, params->n, params->k,
56
+ params->alpha,
57
+ params->a, params->lda, params->stride_a,
58
+ params->b, params->ldb, params->stride_b,
59
+ params->beta,
60
+ params->c, params->ldc, params->stride_c,
61
+ params->batch);
62
+ return OK;
63
+ }
64
+ };
65
+
66
+ template <typename T>
67
+ bool IsZero(T v) {
68
+ return v == 0.0f;
69
+ }
70
+
71
+ template <>
72
+ bool IsZero(BFloat16 v) {
73
+ return v.x == 0;
74
+ }
75
+
76
+ template <>
77
+ bool IsZero(Half v) {
78
+ return float(v) == 0.0f;
79
+ }
80
+
81
+ template <>
82
+ bool IsZero(c10::complex<double> v) {
83
+ return v == 0.0;
84
+ }
85
+
86
+ template <>
87
+ bool IsZero(c10::complex<float> v) {
88
+ return v == 0.0f;
89
+ }
90
+
91
+ template <typename T>
92
+ std::string TypeName(T v) {
93
+ return "unknown";
94
+ }
95
+
96
+ template <>
97
+ std::string TypeName(float v) {
98
+ return "float";
99
+ }
100
+
101
+ template <>
102
+ std::string TypeName(double v) {
103
+ return "double";
104
+ }
105
+
106
+ template <>
107
+ std::string TypeName(BFloat16 v) {
108
+ return "BFloat16";
109
+ }
110
+
111
+ template <>
112
+ std::string TypeName(Half v) {
113
+ return "Half";
114
+ }
115
+
116
+ template <>
117
+ std::string TypeName(c10::complex<double> v) {
118
+ return "c10::complex<double>";
119
+ }
120
+
121
+ template <>
122
+ std::string TypeName(c10::complex<float> v) {
123
+ return "c10::complex<float>";
124
+ }
125
+
126
+
127
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
128
+ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> {
129
+ public:
130
+ GemmTunableOp() {
131
+ this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmOp<T>>());
132
+
133
+ auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators();
134
+
135
+ #ifdef USE_ROCM
136
+ for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps<T>()) {
137
+ this->RegisterOp(std::move(name), std::move(op));
138
+ }
139
+
140
+ if (validators.find("ROCM_VERSION") == validators.end()) {
141
+ std::string rocm_version = ROCM_BUILD_INFO;
142
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
143
+ "ROCM_VERSION",
144
+ [rocm_version]() { return rocm_version; },
145
+ [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; });
146
+ }
147
+
148
+ if (validators.find("GCN_ARCH_NAME") == validators.end()) {
149
+ std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName;
150
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
151
+ "GCN_ARCH_NAME",
152
+ [gcn_arch_name]() { return gcn_arch_name; },
153
+ [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; });
154
+ }
155
+
156
+ if (validators.find("ROCBLAS_VERSION") == validators.end()) {
157
+ std::string rocblas_version = c10::str(
158
+ XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".",
159
+ XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".",
160
+ XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-",
161
+ XSTRINGIFY(ROCBLAS_VERSION_TWEAK));
162
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
163
+ "ROCBLAS_VERSION",
164
+ [rocblas_version]() { return rocblas_version; },
165
+ [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; });
166
+ }
167
+ #endif
168
+
169
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50700
170
+ static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
171
+ if (env == nullptr || strcmp(env, "1") == 0) {
172
+ // disallow tuning of hipblaslt with c10::complex
173
+ if constexpr (
174
+ !std::is_same_v<T, c10::complex<float>> &&
175
+ !std::is_same_v<T, c10::complex<double>>) {
176
+ for (auto&& [name, op] : GetHipBlasLtGemmTypeStringAndOps<T, ALayout, BLayout>()) {
177
+ this->RegisterOp(std::move(name), std::move(op));
178
+ }
179
+ }
180
+
181
+ if (validators.find("HIPBLASLT_VERSION") == validators.end()) {
182
+ std::string hipblaslt_version = c10::str(
183
+ XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".",
184
+ XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".",
185
+ XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-",
186
+ XSTRINGIFY(HIPBLASLT_VERSION_TWEAK));
187
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
188
+ "HIPBLASLT_VERSION",
189
+ [hipblaslt_version]() { return hipblaslt_version; },
190
+ [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; });
191
+ }
192
+ }
193
+ #endif
194
+ }
195
+
196
+ std::string Signature() override {
197
+ return c10::str("GemmTunableOp_", TypeName<T>(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout));
198
+ }
199
+ };
200
+
201
+ template <typename T, BlasOp ALayout, BlasOp BLayout>
202
+ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T>, StreamTimer> {
203
+ public:
204
+ GemmStridedBatchedTunableOp() {
205
+ this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmStridedBatchedOp<T>>());
206
+
207
+ auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators();
208
+
209
+ #ifdef USE_ROCM
210
+ for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps<T>()) {
211
+ this->RegisterOp(std::move(name), std::move(op));
212
+ }
213
+
214
+ if (validators.find("ROCM_VERSION") == validators.end()) {
215
+ std::string rocm_version = ROCM_BUILD_INFO;
216
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
217
+ "ROCM_VERSION",
218
+ [rocm_version]() { return rocm_version; },
219
+ [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; });
220
+ }
221
+
222
+ if (validators.find("GCN_ARCH_NAME") == validators.end()) {
223
+ std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName;
224
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
225
+ "GCN_ARCH_NAME",
226
+ [gcn_arch_name]() { return gcn_arch_name; },
227
+ [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; });
228
+ }
229
+
230
+ if (validators.find("ROCBLAS_VERSION") == validators.end()) {
231
+ std::string rocblas_version = c10::str(
232
+ XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".",
233
+ XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".",
234
+ XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-",
235
+ XSTRINGIFY(ROCBLAS_VERSION_TWEAK));
236
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
237
+ "ROCBLAS_VERSION",
238
+ [rocblas_version]() { return rocblas_version; },
239
+ [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; });
240
+ }
241
+ #endif
242
+
243
+ #if defined(USE_ROCM) && ROCM_VERSION >= 50700
244
+ static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
245
+ if (env == nullptr || strcmp(env, "1") == 0) {
246
+ // disallow tuning of hipblaslt with c10::complex
247
+ if constexpr (
248
+ !std::is_same_v<T, c10::complex<float>> &&
249
+ !std::is_same_v<T, c10::complex<double>>) {
250
+ for (auto&& [name, op] : GetHipBlasLtGemmStridedBatchedTypeStringAndOps<T, ALayout, BLayout>()) {
251
+ this->RegisterOp(std::move(name), std::move(op));
252
+ }
253
+ }
254
+
255
+ if (validators.find("HIPBLASLT_VERSION") == validators.end()) {
256
+ std::string hipblaslt_version = c10::str(
257
+ XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".",
258
+ XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".",
259
+ XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-",
260
+ XSTRINGIFY(HIPBLASLT_VERSION_TWEAK));
261
+ getTuningContext()->GetTuningResultsValidator().RegisterValidator(
262
+ "HIPBLASLT_VERSION",
263
+ [hipblaslt_version]() { return hipblaslt_version; },
264
+ [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; });
265
+ }
266
+ }
267
+ #endif
268
+ }
269
+
270
+ std::string Signature() override {
271
+ return c10::str("GemmStridedBatchedTunableOp_", TypeName<T>(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout));
272
+ }
273
+ };
274
+
275
+ #undef XSTRINGIFY
276
+ #undef STRINGIFY
277
+
278
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Original TunableOp is from onnxruntime.
2
+ // https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
3
+ // https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
4
+ // Copyright (c) Microsoft Corporation.
5
+ // Licensed under the MIT license.
6
+ //
7
+ // Adapting TunableOp into PyTorch
8
+ // Copyright (c) Advanced Micro Devices, Inc.
9
+ //
10
+ #pragma once
11
+
12
+ #include <ATen/cuda/tunable/Tunable.h>
13
+ #include <c10/cuda/CUDACachingAllocator.h>
14
+
15
+ #ifndef _WIN32
16
+ #include <cxxabi.h>
17
+ #endif
18
+
19
+ #include <string>
20
+ #include <type_traits>
21
+ #include <unordered_map>
22
+ #include <vector>
23
+
24
+ namespace at::cuda::tunable {
25
+
26
+ template <typename ParamsT>
27
+ class Callable {
28
+ public:
29
+ Callable() = default;
30
+ Callable(Callable&&) = default;
31
+ virtual ~Callable() = default;
32
+ virtual TuningStatus Call(const ParamsT*) {
33
+ return FAIL;
34
+ }
35
+ virtual TuningStatus IsSupported(const ParamsT* params) {
36
+ return Call(params);
37
+ }
38
+ };
39
+
40
+ template <typename ParamsT, typename TimerT>
41
+ class TunableOp {
42
+ public:
43
+ TunableOp() = default;
44
+ TunableOp(TunableOp&&) = default;
45
+ virtual ~TunableOp() = default;
46
+
47
+ TuningStatus operator()(const ParamsT* params) {
48
+ ResultEntry result = ResultEntry::Null();
49
+ TuningContext* ctx = getTuningContext();
50
+ if (ctx->IsTunableOpEnabled()) {
51
+ auto& mgr = ctx->GetTuningResultsManager();
52
+ auto op_sig = Signature();
53
+ auto params_sig = params->Signature();
54
+ result = mgr.Lookup(op_sig, params_sig);
55
+ // If there is not previous tuning result been found, we do the tuning iff tuning is enabled
56
+ if (result == ResultEntry::Null() && ctx->IsTuningEnabled()) {
57
+ result = FindFastest(params);
58
+ mgr.Add(op_sig, params_sig, result);
59
+ }
60
+ }
61
+ else {
62
+ result = ResultEntry::Default();
63
+ }
64
+ if (result == ResultEntry::Null()) {
65
+ TUNABLE_LOG("no result, using default");
66
+ result = ResultEntry::Default();
67
+ }
68
+ auto iter = ops_.find(result);
69
+ TORCH_CHECK(iter != ops_.end());
70
+ return iter->second->Call(params);
71
+ }
72
+
73
+ virtual std::string Signature() {
74
+ // According to C++17 standard https://wg21.link/n4659 section 15.7.4
75
+ // > if the operand of typeid refers to the
76
+ // > object under construction or destruction, typeid yields the std::type_info object representing the constructor
77
+ // > or destructor’s class.
78
+ // So delay the op signature generation.
79
+ c10::call_once(signature_init_once_, [this]() { signature_ = CreateSignature(); });
80
+ return signature_;
81
+ }
82
+
83
+ protected:
84
+ void RegisterOp(const std::string& name, std::unique_ptr<Callable<ParamsT>> op) {
85
+ this->op_names_.emplace_back(name);
86
+ this->ops_.emplace(name, std::move(op));
87
+ }
88
+
89
+ private:
90
+ static void WarmUp(Callable<ParamsT> *op, ParamsT* param, size_t num_iter) {
91
+ for (size_t i = 0; i < num_iter; i++) {
92
+ TORCH_CHECK(op->Call(param) == OK);
93
+ }
94
+ }
95
+
96
+ static double Profile(Callable<ParamsT> *op, ParamsT* param, size_t num_iter) {
97
+ TimerT timer{};
98
+ timer.Start();
99
+ for (size_t i = 0; i < num_iter; i++) {
100
+ TORCH_CHECK(op->Call(param) == OK);
101
+ }
102
+ timer.End();
103
+ return timer.Duration() / num_iter;
104
+ }
105
+
106
+ protected:
107
+ bool IsNumericsCheckEnabled() {
108
+ static const char *env = getenv("PYTORCH_TUNABLEOP_NUMERICAL_CHECK");
109
+ if (env != nullptr && strcmp(env, "0") == 0) {
110
+ return false;
111
+ }
112
+ return true;
113
+ }
114
+
115
+ virtual ResultEntry FindFastest(const ParamsT* params) {
116
+ TuningContext* ctx = getTuningContext();
117
+ auto op_sig = Signature();
118
+ auto params_sig = params->Signature();
119
+ TUNABLE_LOG("finding fastest for ", op_sig, '(', params_sig, ')', " out of ", op_names_.size(), " candidates");
120
+ auto min_duration_ms = std::numeric_limits<double>::infinity();
121
+ std::string id_name = "Default";
122
+
123
+ // calcaulte a reference answer for numerical check
124
+ ParamsT* reference_params = params->DeepCopy();
125
+ TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK);
126
+
127
+ // need a copy of params to reuse
128
+ ParamsT* reusable_params = params->DeepCopy();
129
+
130
+ for (size_t i = 0; i < op_names_.size(); i++) {
131
+ auto* candidate = ops_[op_names_[i]].get(); // borrow pointer
132
+ auto status = candidate->Call(reusable_params);
133
+ if (status != OK) {
134
+ TUNABLE_LOG("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]);
135
+ continue;
136
+ }
137
+
138
+ if (IsNumericsCheckEnabled()) {
139
+ ParamsT* numerical_params = params->DeepCopy();
140
+ WarmUp(candidate, numerical_params, 1);
141
+ status = reference_params->NumericalCheck(numerical_params);
142
+ numerical_params->Delete();
143
+ if (status != OK) {
144
+ TUNABLE_LOG("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]);
145
+ continue;
146
+ }
147
+ }
148
+
149
+ // collect a small profile
150
+ constexpr const int approx_num_iter = 3;
151
+ auto approx_duration = Profile(candidate, reusable_params, approx_num_iter);
152
+ // bail if too slow
153
+ if (approx_duration > 2 * min_duration_ms) {
154
+ TUNABLE_LOG("├──skip slow instance id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]);
155
+ continue;
156
+ }
157
+
158
+ // for warmup does user set max duration, max iters, or both?
159
+ double max_warmup_duration = ctx->GetMaxWarmupDurationMs();
160
+ int max_warmup_iter = ctx->GetMaxWarmupIterations();
161
+ int warmup_iter = 1; // default
162
+ if (max_warmup_duration > 0) {
163
+ int duration_iters = max_warmup_duration / approx_duration;
164
+ if (max_warmup_iter > 0) {
165
+ warmup_iter = std::min(max_warmup_iter, duration_iters);
166
+ }
167
+ else {
168
+ warmup_iter = duration_iters;
169
+ }
170
+ }
171
+ else if (max_warmup_iter > 0) {
172
+ warmup_iter = max_warmup_iter;
173
+ }
174
+
175
+ // for tuning does user set max duration, max iters, or both?
176
+ double max_tuning_duration = ctx->GetMaxTuningDurationMs();
177
+ int max_tuning_iter = ctx->GetMaxTuningIterations();
178
+ int tuning_iter = 100; // default
179
+ if (max_tuning_duration > 0) {
180
+ int duration_iters = max_tuning_duration / approx_duration;
181
+ if (max_tuning_iter > 0) {
182
+ tuning_iter = std::min(max_tuning_iter, duration_iters);
183
+ }
184
+ else {
185
+ tuning_iter = duration_iters;
186
+ }
187
+ }
188
+ else if (max_tuning_iter > 0) {
189
+ tuning_iter = max_tuning_iter;
190
+ }
191
+
192
+ // do the full warmup followed by tuning
193
+ double warmup_ms = warmup_iter * approx_duration;
194
+ double tuning_ms = tuning_iter * approx_duration;
195
+ TUNABLE_LOG("├──tuning using "
196
+ "warmup iters ", warmup_iter, " [", warmup_ms, " ms] "
197
+ "and tuning iters ", tuning_iter, " [", tuning_ms, " ms] ",
198
+ "instance id=", i, ", ", op_sig, "(", params_sig, ") ", op_names_[i]);
199
+ WarmUp(candidate, reusable_params, warmup_iter);
200
+ auto duration_ms = Profile(candidate, reusable_params, tuning_iter);
201
+ if (duration_ms < min_duration_ms) {
202
+ TUNABLE_LOG("├──found better instance id=", i, ". " , duration_ms, "ms. ", op_names_[i]);
203
+ min_duration_ms = duration_ms;
204
+ id_name = op_names_[i];
205
+ }
206
+ }
207
+
208
+ reusable_params->Delete();
209
+ reference_params->Delete();
210
+
211
+ TUNABLE_LOG("└──found fastest for ", op_sig, '(', params_sig, ") ", id_name);
212
+ return ResultEntry(id_name, min_duration_ms);
213
+ }
214
+
215
+ private:
216
+ std::string CreateSignature() {
217
+ #ifndef _WIN32
218
+ const auto* name = typeid(*this).name();
219
+ char buf[256];
220
+ size_t buf_len = 256;
221
+ abi::__cxa_demangle(name, buf, &buf_len, nullptr);
222
+ buf[255] = '\0';
223
+ return buf;
224
+ #else
225
+ return typeid(*this).name();
226
+ #endif
227
+ }
228
+
229
+ mutable c10::once_flag signature_init_once_;
230
+ std::string signature_;
231
+
232
+ std::unordered_map<std::string, std::unique_ptr<Callable<ParamsT>>> ops_;
233
+ std::vector<std::string> op_names_;
234
+ };
235
+
236
+ struct OpParams {
237
+ OpParams() {}
238
+ virtual ~OpParams() = default;
239
+ virtual std::string Signature() const = 0;
240
+ };
241
+
242
+ } // namespace at::cuda::tunable
venv/lib/python3.10/site-packages/torch/include/ATen/detail/AcceleratorHooksInterface.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+
5
+ namespace at {
6
+
7
+ // AcceleratorHooksInterface is a shared interface provided by all
8
+ // accelerators to allow generic code.
9
+ // This inferface is hook-based as it corresponds to all the functions
10
+ // that are going to be called in a generic way from the CPU code.
11
+
12
+ struct TORCH_API AcceleratorHooksInterface {
13
+ // This should never actually be implemented, but it is used to
14
+ // squelch -Werror=non-virtual-dtor
15
+ virtual ~AcceleratorHooksInterface() = default;
16
+
17
+ // Whether the device at device_index is fully initialized or not.
18
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const = 0;
19
+ };
20
+
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/Registry.h>
6
+
7
+ #include <ATen/detail/AcceleratorHooksInterface.h>
8
+
9
+ // Forward-declares at::Generator and at::cuda::NVRTC
10
+ namespace at {
11
+ struct Generator;
12
+ namespace cuda {
13
+ struct NVRTC;
14
+ } // namespace cuda
15
+ } // namespace at
16
+
17
+ // NB: Class must live in `at` due to limitations of Registry.h.
18
+ namespace at {
19
+
20
+ #ifdef _MSC_VER
21
+ constexpr const char* CUDA_HELP =
22
+ "PyTorch splits its backend into two shared libraries: a CPU library "
23
+ "and a CUDA library; this error has occurred because you are trying "
24
+ "to use some CUDA functionality, but the CUDA library has not been "
25
+ "loaded by the dynamic linker for some reason. The CUDA library MUST "
26
+ "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
27
+ "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ "
28
+ "in your link arguments; many dynamic linkers will delete dynamic library "
29
+ "dependencies if you don't depend on any of their symbols. You can check "
30
+ "if this has occurred by using link on your binary to see if there is a "
31
+ "dependency on *_cuda.dll library.";
32
+ #else
33
+ constexpr const char* CUDA_HELP =
34
+ "PyTorch splits its backend into two shared libraries: a CPU library "
35
+ "and a CUDA library; this error has occurred because you are trying "
36
+ "to use some CUDA functionality, but the CUDA library has not been "
37
+ "loaded by the dynamic linker for some reason. The CUDA library MUST "
38
+ "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
39
+ "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many "
40
+ "dynamic linkers will delete dynamic library dependencies if you don't "
41
+ "depend on any of their symbols. You can check if this has occurred by "
42
+ "using ldd on your binary to see if there is a dependency on *_cuda.so "
43
+ "library.";
44
+ #endif
45
+
46
+ // The CUDAHooksInterface is an omnibus interface for any CUDA functionality
47
+ // which we may want to call into from CPU code (and thus must be dynamically
48
+ // dispatched, to allow for separate compilation of CUDA code). How do I
49
+ // decide if a function should live in this class? There are two tests:
50
+ //
51
+ // 1. Does the *implementation* of this function require linking against
52
+ // CUDA libraries?
53
+ //
54
+ // 2. Is this function *called* from non-CUDA ATen code?
55
+ //
56
+ // (2) should filter out many ostensible use-cases, since many times a CUDA
57
+ // function provided by ATen is only really ever used by actual CUDA code.
58
+ //
59
+ // TODO: Consider putting the stub definitions in another class, so that one
60
+ // never forgets to implement each virtual function in the real implementation
61
+ // in CUDAHooks. This probably doesn't buy us much though.
62
+ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
63
+ // This should never actually be implemented, but it is used to
64
+ // squelch -Werror=non-virtual-dtor
65
+ virtual ~CUDAHooksInterface() override = default;
66
+
67
+ // Initialize THCState and, transitively, the CUDA state
68
+ virtual void initCUDA() const {
69
+ TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
70
+ }
71
+
72
+ virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
73
+ TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
74
+ }
75
+
76
+ virtual Device getDeviceFromPtr(void* /*data*/) const {
77
+ TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
78
+ }
79
+
80
+ virtual bool isPinnedPtr(const void* /*data*/) const {
81
+ return false;
82
+ }
83
+
84
+ virtual bool hasCUDA() const {
85
+ return false;
86
+ }
87
+
88
+ virtual bool hasCUDART() const {
89
+ return false;
90
+ }
91
+
92
+ virtual bool hasMAGMA() const {
93
+ return false;
94
+ }
95
+
96
+ virtual bool hasCuDNN() const {
97
+ return false;
98
+ }
99
+
100
+ virtual bool hasCuSOLVER() const {
101
+ return false;
102
+ }
103
+
104
+ virtual bool hasROCM() const {
105
+ return false;
106
+ }
107
+
108
+ virtual const at::cuda::NVRTC& nvrtc() const {
109
+ TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP);
110
+ }
111
+
112
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
113
+ TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP);
114
+ }
115
+
116
+ virtual DeviceIndex current_device() const {
117
+ return -1;
118
+ }
119
+
120
+ virtual Allocator* getPinnedMemoryAllocator() const {
121
+ TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP);
122
+ }
123
+
124
+ virtual Allocator* getCUDADeviceAllocator() const {
125
+ TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP);
126
+ }
127
+
128
+ virtual bool compiledWithCuDNN() const {
129
+ return false;
130
+ }
131
+
132
+ virtual bool compiledWithMIOpen() const {
133
+ return false;
134
+ }
135
+
136
+ virtual bool supportsDilatedConvolutionWithCuDNN() const {
137
+ return false;
138
+ }
139
+
140
+ virtual bool supportsDepthwiseConvolutionWithCuDNN() const {
141
+ return false;
142
+ }
143
+
144
+ virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const {
145
+ return false;
146
+ }
147
+
148
+ virtual long versionCuDNN() const {
149
+ TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
150
+ }
151
+
152
+ virtual long versionCUDART() const {
153
+ TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP);
154
+ }
155
+
156
+ virtual std::string showConfig() const {
157
+ TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP);
158
+ }
159
+
160
+ virtual double batchnormMinEpsilonCuDNN() const {
161
+ TORCH_CHECK(false,
162
+ "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP);
163
+ }
164
+
165
+ virtual int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex /*device_index*/) const {
166
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
167
+ }
168
+
169
+ virtual void cuFFTSetPlanCacheMaxSize(DeviceIndex /*device_index*/, int64_t /*max_size*/) const {
170
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
171
+ }
172
+
173
+ virtual int64_t cuFFTGetPlanCacheSize(DeviceIndex /*device_index*/) const {
174
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
175
+ }
176
+
177
+ virtual void cuFFTClearPlanCache(DeviceIndex /*device_index*/) const {
178
+ TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
179
+ }
180
+
181
+ virtual int getNumGPUs() const {
182
+ return 0;
183
+ }
184
+
185
+ virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
186
+ TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP);
187
+ }
188
+ };
189
+
190
+ // NB: dummy argument to suppress "ISO C++11 requires at least one argument
191
+ // for the "..." in a variadic macro"
192
+ struct TORCH_API CUDAHooksArgs {};
193
+
194
+ TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
195
+ #define REGISTER_CUDA_HOOKS(clsname) \
196
+ C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
197
+
198
+ namespace detail {
199
+ TORCH_API const CUDAHooksInterface& getCUDAHooks();
200
+ } // namespace detail
201
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <tuple>
4
+
5
+ // Modified from https://stackoverflow.com/questions/7943525/is-it-possible-to-figure-out-the-parameter-type-and-return-type-of-a-lambda
6
+
7
+ // Fallback, anything with an operator()
8
+ template <typename T>
9
+ struct function_traits : public function_traits<decltype(&T::operator())> {
10
+ };
11
+
12
+ // Pointers to class members that are themselves functors.
13
+ // For example, in the following code:
14
+ // template <typename func_t>
15
+ // struct S {
16
+ // func_t f;
17
+ // };
18
+ // template <typename func_t>
19
+ // S<func_t> make_s(func_t f) {
20
+ // return S<func_t> { .f = f };
21
+ // }
22
+ //
23
+ // auto s = make_s([] (int, float) -> double { /* ... */ });
24
+ //
25
+ // function_traits<decltype(&s::f)> traits;
26
+ template <typename ClassType, typename T>
27
+ struct function_traits<T ClassType::*> : public function_traits<T> {
28
+ };
29
+
30
+ // Const class member functions
31
+ template <typename ClassType, typename ReturnType, typename... Args>
32
+ struct function_traits<ReturnType(ClassType::*)(Args...) const> : public function_traits<ReturnType(Args...)> {
33
+ };
34
+
35
+ // Reference types
36
+ template <typename T>
37
+ struct function_traits<T&> : public function_traits<T> {};
38
+ template <typename T>
39
+ struct function_traits<T*> : public function_traits<T> {};
40
+
41
+ // Free functions
42
+ template <typename ReturnType, typename... Args>
43
+ struct function_traits<ReturnType(Args...)> {
44
+ // arity is the number of arguments.
45
+ enum { arity = sizeof...(Args) };
46
+
47
+ typedef std::tuple<Args...> ArgsTuple;
48
+ typedef ReturnType result_type;
49
+
50
+ template <size_t i>
51
+ struct arg
52
+ {
53
+ typedef typename std::tuple_element<i, std::tuple<Args...>>::type type;
54
+ // the i-th argument is equivalent to the i-th tuple element of a tuple
55
+ // composed of those arguments.
56
+ };
57
+ };
58
+
59
+ template <typename T>
60
+ struct nullary_function_traits {
61
+ using traits = function_traits<T>;
62
+ using result_type = typename traits::result_type;
63
+ };
64
+
65
+ template <typename T>
66
+ struct unary_function_traits {
67
+ using traits = function_traits<T>;
68
+ using result_type = typename traits::result_type;
69
+ using arg1_t = typename traits::template arg<0>::type;
70
+ };
71
+
72
+ template <typename T>
73
+ struct binary_function_traits {
74
+ using traits = function_traits<T>;
75
+ using result_type = typename traits::result_type;
76
+ using arg1_t = typename traits::template arg<0>::type;
77
+ using arg2_t = typename traits::template arg<1>::type;
78
+ };
79
+
80
+
81
+ // Traits for calling with c10::guts::invoke, where member_functions have a first argument of ClassType
82
+ template <typename T>
83
+ struct invoke_traits : public function_traits<T>{
84
+ };
85
+
86
+ template <typename T>
87
+ struct invoke_traits<T&> : public invoke_traits<T>{
88
+ };
89
+
90
+ template <typename T>
91
+ struct invoke_traits<T&&> : public invoke_traits<T>{
92
+ };
93
+
94
+ template <typename ClassType, typename ReturnType, typename... Args>
95
+ struct invoke_traits<ReturnType(ClassType::*)(Args...)> :
96
+ public function_traits<ReturnType(ClassType&, Args...)> {
97
+ };
98
+
99
+ template <typename ClassType, typename ReturnType, typename... Args>
100
+ struct invoke_traits<ReturnType(ClassType::*)(Args...) const> :
101
+ public function_traits<ReturnType(const ClassType&, Args...)> {
102
+ };
venv/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/GeneratorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <c10/util/Registry.h>
8
+
9
+ #include <cstddef>
10
+ #include <memory>
11
+
12
+ namespace at {
13
+ class Context;
14
+ }
15
+
16
+ // NB: Class must live in `at` due to limitations of Registry.h.
17
+ namespace at {
18
+
19
+ // The HIPHooksInterface is an omnibus interface for any HIP functionality
20
+ // which we may want to call into from CPU code (and thus must be dynamically
21
+ // dispatched, to allow for separate compilation of HIP code). See
22
+ // CUDAHooksInterface for more detailed motivation.
23
+ struct TORCH_API HIPHooksInterface {
24
+ // This should never actually be implemented, but it is used to
25
+ // squelch -Werror=non-virtual-dtor
26
+ virtual ~HIPHooksInterface() = default;
27
+
28
+ // Initialize the HIP library state
29
+ virtual void initHIP() const {
30
+ AT_ERROR("Cannot initialize HIP without ATen_hip library.");
31
+ }
32
+
33
+ virtual std::unique_ptr<c10::GeneratorImpl> initHIPGenerator(Context*) const {
34
+ AT_ERROR("Cannot initialize HIP generator without ATen_hip library.");
35
+ }
36
+
37
+ virtual bool hasHIP() const {
38
+ return false;
39
+ }
40
+
41
+ virtual c10::DeviceIndex current_device() const {
42
+ return -1;
43
+ }
44
+
45
+ virtual Allocator* getPinnedMemoryAllocator() const {
46
+ AT_ERROR("Pinned memory requires HIP.");
47
+ }
48
+
49
+ virtual void registerHIPTypes(Context*) const {
50
+ AT_ERROR("Cannot registerHIPTypes() without ATen_hip library.");
51
+ }
52
+
53
+ virtual int getNumGPUs() const {
54
+ return 0;
55
+ }
56
+ };
57
+
58
+ // NB: dummy argument to suppress "ISO C++11 requires at least one argument
59
+ // for the "..." in a variadic macro"
60
+ struct TORCH_API HIPHooksArgs {};
61
+
62
+ TORCH_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs);
63
+ #define REGISTER_HIP_HOOKS(clsname) \
64
+ C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname)
65
+
66
+ namespace detail {
67
+ TORCH_API const HIPHooksInterface& getHIPHooks();
68
+
69
+ } // namespace detail
70
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/core/Allocator.h>
6
+ #include <ATen/core/Generator.h>
7
+ #include <ATen/detail/AcceleratorHooksInterface.h>
8
+ #include <c10/util/Exception.h>
9
+ #include <c10/util/Registry.h>
10
+
11
+ #include <cstddef>
12
+
13
+ namespace at {
14
+
15
+ struct TORCH_API MPSHooksInterface : AcceleratorHooksInterface {
16
+ // this fails the implementation if MPSHooks functions are called, but
17
+ // MPS backend is not present.
18
+ #define FAIL_MPSHOOKS_FUNC(func) \
19
+ TORCH_CHECK(false, "Cannot execute ", func, "() without MPS backend.");
20
+
21
+ virtual ~MPSHooksInterface() override = default;
22
+
23
+ // Initialize the MPS library state
24
+ virtual void initMPS() const {
25
+ FAIL_MPSHOOKS_FUNC(__func__);
26
+ }
27
+ virtual bool hasMPS() const {
28
+ return false;
29
+ }
30
+ virtual bool isOnMacOSorNewer(unsigned major = 13, unsigned minor = 0) const {
31
+ FAIL_MPSHOOKS_FUNC(__func__);
32
+ }
33
+ virtual const Generator& getDefaultMPSGenerator() const {
34
+ FAIL_MPSHOOKS_FUNC(__func__);
35
+ }
36
+ virtual Allocator* getMPSDeviceAllocator() const {
37
+ FAIL_MPSHOOKS_FUNC(__func__);
38
+ }
39
+ virtual void deviceSynchronize() const {
40
+ FAIL_MPSHOOKS_FUNC(__func__);
41
+ }
42
+ virtual void commitStream() const {
43
+ FAIL_MPSHOOKS_FUNC(__func__);
44
+ }
45
+ virtual void* getCommandBuffer() const {
46
+ FAIL_MPSHOOKS_FUNC(__func__);
47
+ }
48
+ virtual void* getDispatchQueue() const {
49
+ FAIL_MPSHOOKS_FUNC(__func__);
50
+ }
51
+ virtual void emptyCache() const {
52
+ FAIL_MPSHOOKS_FUNC(__func__);
53
+ }
54
+ virtual size_t getCurrentAllocatedMemory() const {
55
+ FAIL_MPSHOOKS_FUNC(__func__);
56
+ }
57
+ virtual size_t getDriverAllocatedMemory() const {
58
+ FAIL_MPSHOOKS_FUNC(__func__);
59
+ }
60
+ virtual void setMemoryFraction(double /*ratio*/) const {
61
+ FAIL_MPSHOOKS_FUNC(__func__);
62
+ }
63
+ virtual void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const {
64
+ FAIL_MPSHOOKS_FUNC(__func__);
65
+ }
66
+ virtual void profilerStopTrace() const {
67
+ FAIL_MPSHOOKS_FUNC(__func__);
68
+ }
69
+ virtual uint32_t acquireEvent(bool enable_timing) const {
70
+ FAIL_MPSHOOKS_FUNC(__func__);
71
+ }
72
+ virtual void releaseEvent(uint32_t event_id) const {
73
+ FAIL_MPSHOOKS_FUNC(__func__);
74
+ }
75
+ virtual void recordEvent(uint32_t event_id) const {
76
+ FAIL_MPSHOOKS_FUNC(__func__);
77
+ }
78
+ virtual void waitForEvent(uint32_t event_id) const {
79
+ FAIL_MPSHOOKS_FUNC(__func__);
80
+ }
81
+ virtual void synchronizeEvent(uint32_t event_id) const {
82
+ FAIL_MPSHOOKS_FUNC(__func__);
83
+ }
84
+ virtual bool queryEvent(uint32_t event_id) const {
85
+ FAIL_MPSHOOKS_FUNC(__func__);
86
+ }
87
+ virtual double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const {
88
+ FAIL_MPSHOOKS_FUNC(__func__);
89
+ }
90
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
91
+ FAIL_MPSHOOKS_FUNC(__func__);
92
+ }
93
+ #undef FAIL_MPSHOOKS_FUNC
94
+ };
95
+
96
+ struct TORCH_API MPSHooksArgs {};
97
+
98
+ TORCH_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs);
99
+ #define REGISTER_MPS_HOOKS(clsname) \
100
+ C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname)
101
+
102
+ namespace detail {
103
+ TORCH_API const MPSHooksInterface& getMPSHooks();
104
+
105
+ } // namespace detail
106
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/MTIAHooksInterface.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ #include <c10/util/Registry.h>
6
+
7
+ #include <ATen/detail/AcceleratorHooksInterface.h>
8
+
9
+ #include <string>
10
+
11
+ namespace at {
12
+ class Context;
13
+ }
14
+
15
+ namespace at {
16
+
17
+ constexpr const char* MTIA_HELP =
18
+ "The MTIA backend requires MTIA extension for PyTorch;"
19
+ "this error has occurred because you are trying "
20
+ "to use some MTIA's functionality without MTIA extension included.";
21
+
22
+ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
23
+ virtual ~MTIAHooksInterface() override = default;
24
+
25
+ virtual void initMTIA() const {
26
+ TORCH_CHECK(
27
+ false,
28
+ "Cannot initialize MTIA without MTIA Extension for PyTorch.",
29
+ MTIA_HELP);
30
+ }
31
+
32
+ virtual bool hasMTIA() const {
33
+ return false;
34
+ }
35
+
36
+ virtual std::string showConfig() const {
37
+ TORCH_CHECK(
38
+ false,
39
+ "Cannot query detailed MTIA version without MTIA Extension for PyTorch.",
40
+ MTIA_HELP);
41
+ }
42
+
43
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
44
+ TORCH_CHECK(
45
+ false,
46
+ "Cannot check MTIA primary context without MTIA Extension for PyTorch.",
47
+ MTIA_HELP);
48
+ }
49
+
50
+ };
51
+
52
+ struct TORCH_API MTIAHooksArgs {};
53
+
54
+ C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
55
+ #define REGISTER_MTIA_HOOKS(clsname) \
56
+ C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname)
57
+
58
+ namespace detail {
59
+ TORCH_API const MTIAHooksInterface& getMTIAHooks();
60
+ } // namespace detail
61
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/ORTHooksInterface.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <c10/util/Registry.h>
5
+
6
+ constexpr const char* ORT_HELP =
7
+ " You need to 'import torch_ort' to use the 'ort' device in PyTorch. "
8
+ "The 'torch_ort' module is provided by the ONNX Runtime itself "
9
+ "(https://onnxruntime.ai).";
10
+
11
+ // NB: Class must live in `at` due to limitations of Registry.h.
12
+ namespace at {
13
+
14
+ struct TORCH_API ORTHooksInterface {
15
+ // This should never actually be implemented, but it is used to
16
+ // squelch -Werror=non-virtual-dtor
17
+ virtual ~ORTHooksInterface() = default;
18
+
19
+ virtual std::string showConfig() const {
20
+ TORCH_CHECK(false, "Cannot query detailed ORT version information.", ORT_HELP);
21
+ }
22
+ };
23
+
24
+ // NB: dummy argument to suppress "ISO C++11 requires at least one argument
25
+ // for the "..." in a variadic macro"
26
+ struct TORCH_API ORTHooksArgs {};
27
+
28
+ TORCH_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs);
29
+ #define REGISTER_ORT_HOOKS(clsname) \
30
+ C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname)
31
+
32
+ namespace detail {
33
+ TORCH_API const ORTHooksInterface& getORTHooks();
34
+ } // namespace detail
35
+
36
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <ATen/detail/AcceleratorHooksInterface.h>
5
+ #include <c10/core/Allocator.h>
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/Storage.h>
8
+ #include <c10/util/Exception.h>
9
+ namespace at {
10
+
11
+ struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface {
12
+ virtual ~PrivateUse1HooksInterface() override = default;
13
+ virtual const at::Generator& getDefaultGenerator(
14
+ c10::DeviceIndex device_index) {
15
+ TORCH_CHECK_NOT_IMPLEMENTED(
16
+ false,
17
+ "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDefaultGenerator`.");
18
+ }
19
+
20
+ virtual at::Device getDeviceFromPtr(void* data) const {
21
+ TORCH_CHECK_NOT_IMPLEMENTED(
22
+ false,
23
+ "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDeviceFromPtr`.");
24
+ }
25
+
26
+ virtual Allocator* getPinnedMemoryAllocator() const {
27
+ TORCH_CHECK(
28
+ false,
29
+ "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getPinnedMemoryAllocator`.");
30
+ }
31
+
32
+ virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
33
+ TORCH_CHECK_NOT_IMPLEMENTED(
34
+ false,
35
+ "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `hasPrimaryContext`.");
36
+ }
37
+
38
+ virtual void initPrivateUse1() const {}
39
+ virtual void resizePrivateUse1Bytes(const c10::Storage &storage, size_t newsize) const {
40
+ TORCH_CHECK_NOT_IMPLEMENTED(
41
+ false,
42
+ "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `resizePrivateUse1Bytes`.");
43
+ }
44
+ };
45
+
46
+ struct TORCH_API PrivateUse1HooksArgs {};
47
+
48
+ TORCH_API void RegisterPrivateUse1HooksInterface(
49
+ at::PrivateUse1HooksInterface* hook_);
50
+
51
+ TORCH_API at::PrivateUse1HooksInterface* GetPrivateUse1HooksInterface();
52
+
53
+ TORCH_API bool isPrivateUse1HooksRegistered();
54
+
55
+ namespace detail {
56
+
57
+ TORCH_API const at::PrivateUse1HooksInterface& getPrivateUse1Hooks();
58
+
59
+ } // namespace detail
60
+
61
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <ATen/core/Generator.h>
6
+ #include <c10/util/Registry.h>
7
+
8
+ #include <cstddef>
9
+ #include <functional>
10
+ #include <memory>
11
+
12
+ namespace at {
13
+
14
+ constexpr const char* XPU_HELP =
15
+ "The XPU backend requires Intel Extension for Pytorch;"
16
+ "this error has occurred because you are trying "
17
+ "to use some XPU's functionality, but the Intel Extension for Pytorch has not been "
18
+ "loaded for some reason. The Intel Extension for Pytorch MUST "
19
+ "be loaded, EVEN IF you don't directly use any symbols from that!";
20
+
21
+ struct TORCH_API XPUHooksInterface {
22
+ virtual ~XPUHooksInterface() {}
23
+
24
+ virtual void initXPU() const {
25
+ TORCH_CHECK(
26
+ false,
27
+ "Cannot initialize XPU without Intel Extension for Pytorch.",
28
+ XPU_HELP);
29
+ }
30
+
31
+ virtual bool hasXPU() const {
32
+ return false;
33
+ }
34
+
35
+ virtual std::string showConfig() const {
36
+ TORCH_CHECK(
37
+ false,
38
+ "Cannot query detailed XPU version without Intel Extension for Pytorch. ",
39
+ XPU_HELP);
40
+ }
41
+
42
+ virtual int32_t getGlobalIdxFromDevice(const Device& device) const {
43
+ TORCH_CHECK(false, "Cannot get XPU global device index without ATen_xpu library.");
44
+ }
45
+
46
+ virtual Generator getXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
47
+ TORCH_CHECK(false, "Cannot get XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
48
+ }
49
+
50
+ virtual const Generator& getDefaultXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
51
+ TORCH_CHECK(false, "Cannot get default XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
52
+ }
53
+
54
+ virtual DeviceIndex getNumGPUs() const {
55
+ return 0;
56
+ }
57
+
58
+ virtual DeviceIndex current_device() const {
59
+ TORCH_CHECK(false, "Cannot get current device on XPU without ATen_xpu library.");
60
+ }
61
+
62
+ virtual Device getDeviceFromPtr(void* /*data*/) const {
63
+ TORCH_CHECK(false, "Cannot get device of pointer on XPU without ATen_xpu library.");
64
+ }
65
+
66
+ virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
67
+ TORCH_CHECK(false, "Cannot synchronize XPU device without ATen_xpu library.");
68
+ }
69
+ };
70
+
71
+ struct TORCH_API XPUHooksArgs {};
72
+
73
+ C10_DECLARE_REGISTRY(XPUHooksRegistry, XPUHooksInterface, XPUHooksArgs);
74
+ #define REGISTER_XPU_HOOKS(clsname) \
75
+ C10_REGISTER_CLASS(XPUHooksRegistry, clsname, clsname)
76
+
77
+ namespace detail {
78
+ TORCH_API const XPUHooksInterface& getXPUHooks();
79
+ } // namespace detail
80
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _is_all_true {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_is_all_true")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_is_all_true(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional<int64_t> mask_type=c10::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nested_view_from_buffer_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)
26
+ inline at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
27
+ return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
28
+ }
29
+
30
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size);
21
+ TORCH_API at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out);
22
+ TORCH_API at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size);
23
+ TORCH_API at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
21
+ TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/batch_norm_backward_elemt_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor
26
+ inline at::Tensor batch_norm_backward_elemt(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
27
+ return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
28
+ }
29
+
30
+ // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & batch_norm_backward_elemt_out(at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
32
+ return at::_ops::batch_norm_backward_elemt_out::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out);
33
+ }
34
+ // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & batch_norm_backward_elemt_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out) {
36
+ return at::_ops::batch_norm_backward_elemt_out::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/block_diag_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::block_diag(Tensor[] tensors) -> Tensor
26
+ inline at::Tensor block_diag(at::TensorList tensors) {
27
+ return at::_ops::block_diag::call(tensors);
28
+ }
29
+
30
+ // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors) {
32
+ return at::_ops::block_diag_out::call(tensors, out);
33
+ }
34
+ // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out) {
36
+ return at::_ops::block_diag_out::call(tensors, out);
37
+ }
38
+
39
+ }