diff --git a/ckpts/universal/global_step120/zero/12.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/12.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..01205c5ffdf2e0de23e604a5ad5c7af5f7045955 --- /dev/null +++ b/ckpts/universal/global_step120/zero/12.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d1e1de7da32d031f5d674826636ab1772d88fa39dc3da0721f360a1d0f17ae8 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8f524d2463e39d0eb11dd2e648afae410506ab28 --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ea2c853697ca65cd3f0efcbcf1140f0a964462dcd606ee35cb19f3f4de6f754 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..00689c3f2e78f2898fbb754f5c5f21eabfd7b672 --- /dev/null +++ b/ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7fb268dfd9df89bfa25046edb21c47bc6a4852c3c2901a3fb650801b1fe63a5 +size 33555612 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b0b1412298d7b65bd0354d234bbddd09f19031a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh @@ -0,0 +1,47 @@ +#include + +#include + +namespace at::cuda { + +/** + Computes ceil(a / b) +*/ +template +__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) { + return (a + b - 1) / b; +} + +namespace { + +// Threads per block for our apply kernel +// FIXME: use occupancy calculator instead +constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512; +constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4; + +template +inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + if (curDevice == -1) return false; + uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step); + uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread); + uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0]; + if (numBlocks > maxGridX) + numBlocks = maxGridX; + grid = dim3(numBlocks); + return true; +} + +constexpr int getApplyBlocksPerSM() { + return AT_APPLY_BLOCKS_PER_SM; +} + +constexpr int getApplyBlockSize() { + return AT_APPLY_THREADS_PER_BLOCK; +} + +inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + return dim3(max_threads_per_block); +} + +} // anonymous namespace +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh new file mode 100644 index 0000000000000000000000000000000000000000..56ee8f87e253049421c1268a21258f52a729c3b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh @@ -0,0 +1,508 @@ +#pragma once + +#include +#include +#include + +#include + +#if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))) +#include +#endif + +template +struct AtomicFPOp; + +template <> +struct AtomicFPOp { + template + inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::Half hsum; + do { + assumed = old; + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + hsum = func(hsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return hsum; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::BFloat16 bsum; + do { + assumed = old; + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + bsum = func(bsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return bsum.x; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ double operator() (double * address, double val, const func_t& func) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull; + unsigned long long int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, func(val, assumed)); + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __longlong_as_double(old); + } +}; + +#define ATOMIC_INTEGER_IMPL(NAME) \ +template \ +struct Atomic##NAME##IntegerImpl; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 3; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + uint32_t old = *address_as_ui; \ + uint32_t shift = offset * 8; \ + uint32_t old_byte; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_byte = (old >> shift) & 0xff; \ + newval = static_cast(func(val, static_cast(old_byte))); \ + newval = (old & ~(0x000000ff << shift)) | (newval << shift); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 2; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + bool is_32_align = offset; \ + uint32_t old = *address_as_ui; \ + uint32_t old_bytes; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_bytes = is_32_align ? old >> 16 : old & 0xffff; \ + newval = static_cast(func(val, static_cast(old_bytes))); \ + newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + uint32_t * address_as_ui = (uint32_t *) (address); \ + uint32_t old = *address_as_ui; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + unsigned long long * address_as_ui = (unsigned long long *) (address); \ + unsigned long long old = *address_as_ui; \ + unsigned long long newval; \ + unsigned long long assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; + + +# define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \ +static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \ +Atomic##NAME##IntegerImpl()(address, \ + val, \ + [](DTYPE a, DTYPE b) { \ + return OP; \ + }); \ +} \ + +ATOMIC_INTEGER_IMPL(Add) +GPU_ATOMIC_INTEGER(Add, a || b, bool) + +// Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64) +static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](uint8_t a, uint8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int8_t a, int8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int16_t a, int16_t b) { + return a + b; + }); +} + +static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) { + return atomicAdd(address, val); +} + +static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) { +#if defined(USE_ROCM) + __atomic_fetch_add(address, val, __ATOMIC_RELAXED); +#else + static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed"); + atomicAdd(reinterpret_cast(address), static_cast(val)); +#endif +} + +static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) + return AtomicFPOp()(address, val, + [](at::Half hsum, at::Half val) { + return hsum + val; + }); +#else + return atomicAdd(reinterpret_cast<__half*>(address), val); +#endif +} + +static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))) +return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum + val; + }); +#else + __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val)); + return *reinterpret_cast(&r); +#endif +} + +#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600) +// from CUDA C Programmic Guide +static inline __device__ double atomicAdd(double* address, double val) +#if defined(__clang__) && defined(__CUDA__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wgcc-compat" + __attribute__((enable_if(true, ""))) +#pragma GCC diagnostic pop +#endif +{ + + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val + __longlong_as_double(assumed)); + }); +} +#elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__)) + +/* Note [hip-clang differences to hcc] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The upcoming hip-clang compiler for ROCm differs from hcc in a few details. + * It exports the __HIP__ macro, we can hence differentiate between hcc and + * hip-clang. In the below, hcc only received support for atomicAdd with double + * typing after work week 18312. hip-clang had support from the first version. + * In general, the code-visible differences between hip-clang and hcc will be + * minimal. + */ + +#if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__ + // This needs to be defined for the host side pass + static inline __device__ double atomicAdd(double *address, double val) { } +#endif +#endif + +static inline __device__ double gpuAtomicAdd(double *address, double val) { + return atomicAdd(address, val); +} + +static inline __device__ float gpuAtomicAdd(float *address, float val) { + return atomicAdd(address, val); +} + +template +static inline __device__ void gpuAtomicAdd(c10::complex *address, c10::complex val) { + gpuAtomicAdd(&address->real_, val.real_); + gpuAtomicAdd(&address->imag_, val.imag_); +} + +/* Note [gpuAtomicAdd vs atomicAdd] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Some extensions such as torchvision call atomicAdd() + * directly and require non-library provided data type support. Only for these, we + * continue to provide atomicAdd overloads. + */ +static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int8_t *address, int8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int16_t *address, int16_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int64_t *address, int64_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(bool *address, bool val) { + gpuAtomicAdd(address, val); +} + +/* Note [explicitly non-returning atomics] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet(). + * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction. + * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd, + * therefore we need a new API 'gpuAtomicAddNoReturn'. + */ +template +static inline __device__ void gpuAtomicAddNoReturn(c10::complex *address, c10::complex val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); } + +/* Special case fp32 atomic. */ +#if defined(USE_ROCM) +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); } +#else +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); } +#endif + +// Atomic multiplication implementation. + +ATOMIC_INTEGER_IMPL(Mul) +GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int16_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int32_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int64_t) + +inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return bsum * val; + }); +} + +inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum * val; + }); +} + +inline __device__ double gpuAtomicMul(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val * __longlong_as_double(assumed)); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMul (float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val * + __int_as_float(assumed))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic maximum implementation. + +template +__host__ __device__ T safe_max(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b)); + #else + T max = at::_isnan(b) ? b : std::max(a, b); + #endif + + return max; +} + +ATOMIC_INTEGER_IMPL(Max) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_max(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_max(bsum, val); + }); +} + +inline __device__ double gpuAtomicMax(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_max(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMax(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_max(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic minimum implementation. + +template +__host__ __device__ T safe_min(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b)); + #else + T min = at::_isnan(b) ? b : std::min(a, b); + #endif + + return min; +} + +ATOMIC_INTEGER_IMPL(Min) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_min(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_min(bsum, val); + }); +} + +inline __device__ double gpuAtomicMin(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_min(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMin(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_min(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4dcdabf17b3b90ad598db48f7210e3932142aaad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh @@ -0,0 +1,537 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// +// This file contains pointwise operation functions and kernels that +// work on both contiguous and non-contiguous tensor arguments of +// arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without +// copying or temporary storage. +// + +/* + NOTE [ CUDA_tensor_applyN helpers ] + + The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4) + functions apply a pointwise operator to N tensor(s). + + The calling convention is + + 1. The template arguments should be, sequentially, + - First N typename args specify the scalar types of each of the N tensors. + - (Optional) `int step` arg specifies the number of elements processed + together at the same time. + Default is 1. + - A usually omitted (i.e., inferred) typename arg specifies the type of the + function/functor applied on `N * step` values in each iteration of each + CUDA thread. + 2. The arguments should be, sequentially, + - N tensors + - op: a function/functor that processes `N * step` values at the same time. + - If `step == 1`, it must have signature + `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where + `scalar*_t`s are the first N typename template args, and the inputs + are the `N` values from the `N` tensors retrieved at a common index. + - Otherwise, it must must have signature + void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times + scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times + ..., + scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times + Different from `step == 1` case, it processes `N * step` values taken + from `step` common indices. Moreover, the first input `n` represents the + number of valid indices (it will always have `0 < n <= step`). It will + almost always be `step`, but at the boundary we may not have full `step` + elements and `n` can be a lesser value. + + E.g., if `step == 4` and `N == 2`, `op` could be + + [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4, + scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) { + // Only process u1, ..., un and v1, ..., vn. + // So if `n == 3`, `u4` and `v4` need not to be considered. + } + + In both cases, the references can actually be const, but at least one of + them should be non-const in order to write the output. + - (Optional, but recommended) N TensorArgType args that specify for each + tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite), + or only reads (i.e., TensorArgType::ReadOnly). + Default is TensorArgType::ReadWrite for first Tensor, and + TensorArgType::ReadOnly for the rest. + + E.g., + + to compute a = b^2 for a and b of same dtype, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; } + ); + + to work on 2 values at the same time, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2, + const scalar2 &b_val1, const scalar2 &b_val2) { + // call special vectorized op here, or just do elementwise and enjoy unrolling... + // if n == 1, only process a_val1 and b_val1 + } + ); +*/ + +namespace at::cuda { + +// TODO: combine with TensorArg? So far that's been for debugging, and this is functional... +enum class TensorArgType { ReadWrite, ReadOnly }; + +namespace { + +// Rearrange dimensions for pointwise operations so that strides are in +// decreasing order as much as possible, so that kernels have better memory +// access patterns. +// +// For example, consider a binary operation on two "transposed" 2-dim tensors: +// sizes: 256 512 +// aInfo->strides: 1 256 +// bInfo->strides: 1 256 +// +// Given this, each concurrent memory access inside kernelPointwiseApply2() is +// exactly 256 elements apart, resulting in poor performance. +// +// This function exchanges dimensions so that memory access is contiguous: +// sizes: 512 256 +// aInfo->strides: 256 1 +// bInfo->strides: 256 1 +// +// (Actually, it becomes even better because now collapseDims() can turn each +// input into one contiguous array.) +// +// In general, given M (<=4) TensorInfo's with N dimensions, we can view each +// strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange +// strides[i] and [j] if +// (1) strides[i][k] < strides[j][k] for some k (0 <= k < M) +// (exchanging them will benefit input #k), and +// (2) strides[i][k] <= strieds[j][k] for all k +// (exchanging them will not make any input worse). +template +inline void rearrangeDims(detail::TensorInfo* aInfo, + detail::TensorInfo* bInfo = nullptr, + detail::TensorInfo* cInfo = nullptr, + detail::TensorInfo* dInfo = nullptr) { + int numInfos = 1; + int dims = aInfo->dims; + IndexType *sizes[4] = { aInfo->sizes, }; + IndexType *strides[4] = { aInfo->strides, }; + + if (bInfo != nullptr) { + ++numInfos; + if (bInfo->dims != dims) return; + sizes[1] = bInfo->sizes; + strides[1] = bInfo->strides; + } + + if (cInfo != nullptr) { + ++numInfos; + if (cInfo->dims != dims) return; + sizes[2] = cInfo->sizes; + strides[2] = cInfo->strides; + } + + if (dInfo != nullptr) { + ++numInfos; + if (dInfo->dims != dims) return; + sizes[3] = dInfo->sizes; + strides[3] = dInfo->strides; + } + + // Bail out if sizes do not match: we are using "deprecated pointwise + // behavior" among tensors of different shapes but same number of elements. + for (int i = 1; i < numInfos; ++i) { + for (int j = 0; j < dims; ++j) { + if (sizes[i][j] != sizes[0][j]) return; + } + } + + for (int i = 0; i < dims - 1; ++i) { + // No need to consider dimensions of size 1. + if (sizes[0][i] == 1) continue; + + for (int j = i + 1; j < dims; ++j) { + if (sizes[0][j] == 1) continue; + + // Compare the relative sizes of strides between dim #i and dim #j. + bool hasIncreasingStrides = false; + bool hasDecreasingStrides = false; + + for (int k = 0; k < numInfos; k++) { + IndexType stride_i = strides[k][i]; + IndexType stride_j = strides[k][j]; + if (stride_i < stride_j) { + hasIncreasingStrides = true; + } else if (stride_i > stride_j) { + hasDecreasingStrides = true; + } + } + + if (hasIncreasingStrides && !hasDecreasingStrides) { + for (int k = 0; k < numInfos; k++) { + IndexType size = sizes[k][i]; + sizes[k][i] = sizes[k][j]; + sizes[k][j] = size; + + IndexType stride = strides[k][i]; + strides[k][i] = strides[k][j]; + strides[k][j] = stride; + } + } + } + } +} + +// The `remaining_steps` argument is used to support Op that operates on +// multiple elements at the same time. Generally, the strategy of ApplyOpN is to +// 1. Initialize `remaining_steps = step`, where `step` is the template arg of +// CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the +// number of elements in bound for this call. It will almost always equal to +// `step` except at boundaries. +// 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in +// bound), and recursively call `ApplyOpN` with `remaining_steps - 1`. +// 3. At `remaining_steps = 0`, +// if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`; +// if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep, +// tensor2_val1, tensor2_val2, ..., tesor2_valstep, +// ... +// tensorN_val1, tensorN_val2, ..., tesorN_valstep);` +// +// See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like. + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... aOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = sizeof...(Offsets) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + ApplyOp1::apply( + a, op, n, linearIndex + 1, aOffsets..., aOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, + int n, IndexType linearIndex, Offset offset) { + op(a.data[offset]); +} +}; + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... offsets) { + op(n, a.data[offsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) +#endif +__global__ void kernelPointwiseApply1(detail::TensorInfo a, + IndexType totalElements, const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp1::apply( + a, op, ::min(step, static_cast(totalElements - linearIndex)), linearIndex); + } +} + + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int64_t n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, b) : 0; + + ApplyOp2::apply( + a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int /*n*/, IndexType /*linearIndex*/, + Offset aOffset, Offset bOffset) { + op(a.data[aOffset], b.data[bOffset]); +} +}; + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + op(n, a.data[aOffsets]..., b.data[bOffsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) +#endif +__global__ void +kernelPointwiseApply2(detail::TensorInfo a, + detail::TensorInfo b, + IndexType totalElements, + const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp2::apply( + a, b, op, ::min(step, static_cast(totalElements - linearIndex)), + linearIndex); + } +} + +} // anonymous namespace + +template +inline bool CUDA_tensor_apply2(at::TensorBase a, + at::TensorBase b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(), + "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got " + "tensors with type ", a.device().type(), " and ", b.device().type()); + int64_t totalElements = a.numel(); + + if (totalElements != b.numel()) { + return false; + } + + if (a.dim() > MAX_TENSORINFO_DIMS || + b.dim() > MAX_TENSORINFO_DIMS) { + return false; + } + + if (a.numel() == 0) { + // Empty tensor; do nothing + return true; + } + const dim3 block = getApplyBlock(max_threads_per_block); + + dim3 grid; + auto curDevice = current_device(); + if (curDevice == -1) return false; + if (!getApplyGrid(totalElements, grid, curDevice, max_threads_per_block)) { + return false; + } + + /* + Expands readable/writable tensors whose indices may be "overlapped." + This ensures that each element of the tensor is operated on once and only + once. + */ + TensorBase oldA; + TensorBase oldB; + + if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) { + // Must perform in contiguous space + oldA = std::exchange(a, a.contiguous()); + } + if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) { + // Must perform in contiguous space + oldB = std::exchange(b, b.contiguous()); + } + + // It is possible that the tensor dimensions are able to be collapsed, + // and thus we can reduce the actual code complexity of the copy by + // exploiting this knowledge statically, since the div/mod is the + // most expensive part of the operation, more so than memory accesses. + // For instance, when copying a non-contiguous to a contiguous tensor + // (or vice versa), the contiguous tensor can be collapsed to one + // dimension, and the loop to translate the linear index to the array + // index can be similarly collapsed. That is what this unrolling is for. + +#define HANDLE_CASE(TYPE, A, B) \ + kernelPointwiseApply2 \ + <<>>( \ + aInfo, bInfo, static_cast(totalElements), op); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + +#define HANDLE_B_CASE(TYPE, A, B) { \ + switch (B) { \ + case 1: \ + HANDLE_CASE(TYPE, A, 1); \ + break; \ + case 2: \ + HANDLE_CASE(TYPE, A, 2); \ + break; \ + default: \ + HANDLE_CASE(TYPE, A, -1); \ + break; \ + } \ +} + +#define HANDLE_A_CASE(TYPE, A, B) { \ + switch (A) { \ + case 1: \ + HANDLE_B_CASE(TYPE, 1, B); \ + break; \ + case 2: \ + HANDLE_B_CASE(TYPE, 2, B); \ + break; \ + default: \ + HANDLE_B_CASE(TYPE, -1, B); \ + break; \ + } \ +} + + if (detail::canUse32BitIndexMath(a) && + detail::canUse32BitIndexMath(b)) { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims); + } else { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + /* + Only instantiates the all 1D special case and the fallback all nD case for + large (64-bit indexed) tensors to reduce compilation time. + */ + if (aInfo.dims == 1 && bInfo.dims == 1) { + HANDLE_CASE(uint64_t, 1, 1); + } else { + HANDLE_CASE(uint64_t, -1, -1); + } + } +#undef HANDLE_CASE +#undef HANDLE_B_CASE +#undef HANDLE_A_CASE + + if (oldA.defined()) { + at::native::copy_ignoring_overlaps(oldA, a); + } + + if (oldB.defined()) { + at::native::copy_ignoring_overlaps(oldB, b); + } + + return true; +} + +/* Provides default step = 1 to CUDA_tensor_apply2. */ +template +inline bool CUDA_tensor_apply2(const at::TensorBase &a, + const at::TensorBase &b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + return CUDA_tensor_apply2(a, b, op, aType, bType); +} + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h new file mode 100644 index 0000000000000000000000000000000000000000..eb12bb350c5987187b85aaf68b9228b1919f6e49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h @@ -0,0 +1,375 @@ +#pragma once +/* + Provides a subset of CUDA BLAS functions as templates: + + gemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, + ldc) + + gemv(transa, m, n, alpha, a, lda, x, incx, beta, y, incy) + + dot(n, x, incx, y, incy, result) + + where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot). + The functions are available in at::cuda::blas namespace. + */ + +#include +#include + +namespace at::cuda::blas { + +// RAII guard that sets the CuBLAS pointer mode and restores it to +// its previous value when the guard is destroyed +class PointerModeGuard { +public: + PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) : + handle(handle) { + TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode)); + TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode)); + } + + ~PointerModeGuard() { + cublasSetPointerMode(handle, previous_mode); + } + +private: + cublasHandle_t handle; + cublasPointerMode_t previous_mode; +}; + +/* LEVEL 3 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type beta,\ + Dtype *c, int64_t ldc + +#define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc + +template +inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +template +inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm_internal: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +enum GEMMAndBiasActivationEpilogue { + None, + RELU, + GELU, +}; + +// NOTE: GELU activation is not supported prior to CUDA 11.4 and will +// do nothing if passed in that case. +template +void gemm_and_bias( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + at::opmath_type alpha_val, + const Dtype* mat1_ptr, + int64_t mat1_ld, + const Dtype* mat2_ptr, + int64_t mat2_ld, + const Dtype* bias, + Dtype* result_ptr, + int64_t result_ld, + GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None); + +void int8_gemm( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + const int8_t* mat1_ptr, + int64_t mat1_ld, + const int8_t* mat2_ptr, + int64_t mat2_ld, + int32_t* result_ptr, + int64_t result_ld); + +void scaled_gemm( + char transa, + char transb, + int64_t m, + int64_t n, + int64_t k, + const void* mat1_ptr, + const void* mat1_scale_ptr, + int64_t mat1_ld, + ScalarType mat1_dtype, + const void* mat2_ptr, + const void* mat2_scale_ptr, + int64_t mat2_ld, + ScalarType mat2_dtype, + const void* bias_ptr, + ScalarType bias_dtype, + void* result_ptr, + const void* result_scale_ptr, + int64_t result_ld, + ScalarType result_dtype, + void* amax_ptr, + bool use_fast_accum); +#endif + +#define CUDABLAS_BGEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, int64_t stridea, \ + const Dtype *b, int64_t ldb, int64_t strideb, \ + at::opmath_type beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches + +#define CUDABLAS_BGEMM_ARGS(Dtype) \ + transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches + +template +inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +template +inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm_internal: not implemented for ", typeid(Dtype).name()); +} + +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +#if defined(USE_ROCM) && ROCM_VERSION <= 50500 +// ROCm 5.6 hipblas matches the const Dtype *A API, but prior hipblas does not. +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + hipblasHandle_t handle, hipblasSideMode_t side, hipblasFillMode_t uplo, \ + hipblasOperation_t trans, hipblasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A, int lda, Dtype *B, int ldb +#else +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb +#endif + +template +inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); + +#define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \ + int batchCount + +template +inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::trsmBatched: not implemented for ", + typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); + +/* LEVEL 2 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMV_ARGTYPES(Dtype) \ + char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \ + const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy + +template +inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(double)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(float)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::Half)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::BFloat16)); + +/* LEVEL 1 BLAS FUNCTIONS */ + +#define CUDABLAS_DOT_ARGTYPES(Dtype) \ + cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \ + int incy, Dtype *result + +template +inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void dot(CUDABLAS_DOT_ARGTYPES(double)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(float)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::Half)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::BFloat16)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +template +inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRS_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, \ + int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \ + Dtype** dB_array, int ldb, int* info_array, int batchsize + +template +void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ", + typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); + +#define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \ + Dtype **tau_array, int *info, int batchsize + +template +void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::geqrfBatched: not implemented for ", + typeid(Dtype).name()); +} +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRF_ARGTYPES(Dtype) \ + int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize + +template +void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) { + TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); + +#define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize + +template +void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name()); +} + +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); + +} // namespace at::cuda::blas diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h new file mode 100644 index 0000000000000000000000000000000000000000..0cb024dd701b284502965cba681f1f9beb214592 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +// Preserved for BC, as many files depend on these includes +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h new file mode 100644 index 0000000000000000000000000000000000000000..4ec35f59a210860054ecf84807d173b1c0766a58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h @@ -0,0 +1,95 @@ +#pragma once +// Light-weight version of CUDAContext.h with fewer transitive includes + +#include + +#include +#include +#include + +// cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also +// added bf16 support +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +#include +#endif + +#ifdef CUDART_VERSION +#include +#endif + +#if defined(USE_ROCM) && ROCM_VERSION >= 50300 +#include +#endif + +#include +#include + +namespace c10 { +struct Allocator; +} + +namespace at::cuda { + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +inline int64_t getNumGPUs() { + return c10::cuda::device_count(); +} + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +inline bool is_available() { + return c10::cuda::device_count() > 0; +} + +TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties(); + +TORCH_CUDA_CPP_API int warp_size(); + +TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device); + +TORCH_CUDA_CPP_API bool canDeviceAccessPeer( + c10::DeviceIndex device, + c10::DeviceIndex peer_device); + +TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator(); + +/* Handles */ +TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle(); +TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle(); +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle(); +#endif + +TORCH_CUDA_CPP_API void clearCublasWorkspaces(); + +#if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 +TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle(); +#endif + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h new file mode 100644 index 0000000000000000000000000000000000000000..7b3f5c391f2ac074b2a50961fafc1b42dc1a2320 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h @@ -0,0 +1,115 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +template +cudaDataType getCudaDataType() { + TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.") +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_16F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_32F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_64F; +} + +// HIP doesn't define integral types +#ifndef USE_ROCM +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8U; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32I; +} +#endif + +#if !defined(USE_ROCM) +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16BF; +} +#endif + +inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) { + switch (scalar_type) { +// HIP doesn't define integral types +#ifndef USE_ROCM + case c10::ScalarType::Byte: + return CUDA_R_8U; + case c10::ScalarType::Char: + return CUDA_R_8I; + case c10::ScalarType::Int: + return CUDA_R_32I; +#endif + case c10::ScalarType::Half: + return CUDA_R_16F; + case c10::ScalarType::Float: + return CUDA_R_32F; + case c10::ScalarType::Double: + return CUDA_R_64F; + case c10::ScalarType::ComplexHalf: + return CUDA_C_16F; + case c10::ScalarType::ComplexFloat: + return CUDA_C_32F; + case c10::ScalarType::ComplexDouble: + return CUDA_C_64F; +#if !defined(USE_ROCM) + case c10::ScalarType::Short: + return CUDA_R_16I; + case c10::ScalarType::Long: + return CUDA_R_64I; + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 + case c10::ScalarType::Float8_e4m3fn: + return CUDA_R_8F_E4M3; + case c10::ScalarType::Float8_e5m2: + return CUDA_R_8F_E5M2; +#endif +#else // USE_ROCM + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#if defined(HIP_NEW_TYPE_ENUMS) + case c10::ScalarType::Float8_e4m3fnuz: + return HIP_R_8F_E4M3_FNUZ; + case c10::ScalarType::Float8_e5m2fnuz: + return HIP_R_8F_E5M2_FNUZ; +#else + case c10::ScalarType::Float8_e4m3fnuz: + return static_cast(1000); + case c10::ScalarType::Float8_e5m2fnuz: + return static_cast(1001); +#endif +#endif + default: + TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.") + } +} + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9a5eb849a091be6e86658a8c7af87a0a3fbb8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +inline Device getDeviceFromPtr(void* ptr) { + cudaPointerAttributes attr{}; + + AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr)); + +#if !defined(USE_ROCM) + TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered, + "The specified pointer resides on host memory and is not registered with any CUDA device."); +#endif + + return {c10::DeviceType::CUDA, static_cast(attr.device)}; +} + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b34b46d38d970fbce15125ef00894e7603cf74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h @@ -0,0 +1,208 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace at::cuda { + +/* +* CUDAEvents are movable not copyable wrappers around CUDA's events. +* +* CUDAEvents are constructed lazily when first recorded unless it is +* reconstructed from a cudaIpcEventHandle_t. The event has a device, and this +* device is acquired from the first recording stream. However, if reconstructed +* from a handle, the device should be explicitly specified; or if ipc_handle() is +* called before the event is ever recorded, it will use the current device. +* Later streams that record the event must match this device. +*/ +struct TORCH_CUDA_CPP_API CUDAEvent { + // Constructors + // Default value for `flags` is specified below - it's cudaEventDisableTiming + CUDAEvent() noexcept = default; + CUDAEvent(unsigned int flags) noexcept : flags_{flags} {} + + CUDAEvent( + DeviceIndex device_index, const cudaIpcEventHandle_t* handle) { + device_index_ = device_index; + CUDAGuard guard(device_index_); + + AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle)); + is_created_ = true; + } + + // Note: event destruction done on creating device to avoid creating a + // CUDA context on other devices. + ~CUDAEvent() { + try { + if (is_created_) { + CUDAGuard guard(device_index_); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_deletion(reinterpret_cast(event_)); + } + AT_CUDA_CHECK(cudaEventDestroy(event_)); + } + } catch (...) { /* No throw */ } + } + + CUDAEvent(const CUDAEvent&) = delete; + CUDAEvent& operator=(const CUDAEvent&) = delete; + + CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); } + CUDAEvent& operator=(CUDAEvent&& other) noexcept { + if (this != &other) { + moveHelper(std::move(other)); + } + return *this; + } + + operator cudaEvent_t() const { return event(); } + + // Less than operator (to allow use in sets) + friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) { + return left.event_ < right.event_; + } + + optional device() const { + if (is_created_) { + return at::Device(at::kCUDA, device_index_); + } else { + return {}; + } + } + + bool isCreated() const { return is_created_; } + DeviceIndex device_index() const {return device_index_;} + cudaEvent_t event() const { return event_; } + + // Note: cudaEventQuery can be safely called from any device + bool query() const { + if (!is_created_) { + return true; + } + + cudaError_t err = cudaEventQuery(event_); + if (err == cudaSuccess) { + return true; + } else if (err != cudaErrorNotReady) { + C10_CUDA_CHECK(err); + } else { + // ignore and clear the error if not ready + (void)cudaGetLastError(); + } + + return false; + } + + void record() { record(getCurrentCUDAStream()); } + + void recordOnce(const CUDAStream& stream) { + if (!was_recorded_) record(stream); + } + + // Note: cudaEventRecord must be called on the same device as the event. + void record(const CUDAStream& stream) { + if (!is_created_) { + createEvent(stream.device_index()); + } + + TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_, + " does not match recording stream's device ", stream.device_index(), "."); + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventRecord(event_, stream)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_record( + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + was_recorded_ = true; + } + + // Note: cudaStreamWaitEvent must be called on the same device as the stream. + // The event has no actual GPU resources associated with it. + void block(const CUDAStream& stream) { + if (is_created_) { + CUDAGuard guard(stream.device_index()); + AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_wait( + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + } + } + + // Note: cudaEventElapsedTime can be safely called from any device + float elapsed_time(const CUDAEvent& other) const { + TORCH_CHECK(is_created_ && other.isCreated(), + "Both events must be recorded before calculating elapsed time."); + float time_ms = 0; + // raise cudaErrorNotReady if either event is recorded but not yet completed + AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_)); + return time_ms; + } + + // Note: cudaEventSynchronize can be safely called from any device + void synchronize() const { + if (is_created_) { + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_synchronization(reinterpret_cast(event_)); + } + AT_CUDA_CHECK(cudaEventSynchronize(event_)); + } + } + + // Note: cudaIpcGetEventHandle must be called on the same device as the event + void ipc_handle(cudaIpcEventHandle_t * handle) { + if (!is_created_) { + // this CUDAEvent object was initially constructed from flags but event_ + // is not created yet. + createEvent(getCurrentCUDAStream().device_index()); + } + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_)); + } + +private: + unsigned int flags_ = cudaEventDisableTiming; + bool is_created_ = false; + bool was_recorded_ = false; + DeviceIndex device_index_ = -1; + cudaEvent_t event_{}; + + void createEvent(DeviceIndex device_index) { + device_index_ = device_index; + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_creation(reinterpret_cast(event_)); + } + is_created_ = true; + } + + void moveHelper(CUDAEvent&& other) { + std::swap(flags_, other.flags_); + std::swap(is_created_, other.is_created_); + std::swap(was_recorded_, other.was_recorded_); + std::swap(device_index_, other.device_index_); + std::swap(event_, other.event_); + } +}; + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..2fe8a6f6c8f4fe9ac6a8bc8f78c145ae48e15c9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h @@ -0,0 +1,138 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { +/** + * Note [CUDA Graph-safe RNG states] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Strategy: + * ~~~~~~~~~ + * (It helps to look at + * cuda/detail/PhiloxCudaStateRaw.cuh and + * cuda/detail/UnpackRaw.cuh + * while you read this.) + * + * A CUDA graph containing multiple RNG ops behaves like a + * single giant kernel from the perspective of ops external + * to the graph. During graph capture, logic in CUDAGeneratorImpl + * records the total of all offset increments that occur in the + * graphed region, and records the final total as the offset for + * the entire graph. + * + * When the graph reruns, the logic that reruns it + * increments this device's CUDA generator's offset + * by that total. + * + * Meanwhile, within the graph, at capture time, instead of + * populating PhiloxCudaStates with the uint64_t offset pulled + * directly from the global state, PhiloxCudaState uses a pointer + * to a one-element stream-local int64_t device tensor + * holding an initial offset value, and a uint64_t holding an + * intra-graph offset. (The intra-graph offset starts from zero + * when capture begins.) In each consumer kernel, + * at::cuda::philox::unpack computes the offset to use for this kernel + * as intra-graph offset + *initial offset. + * + * When the graph reruns, the logic that reruns it first + * fill_s the initial offset tensor with this device's + * CUDA generator's current offset. + * + * The control flow above ensures graphed execution is bitwise + * identical to eager execution as long as RNG ops are enqueued + * from a single thread, even if RNG ops and graphs containing + * RNG ops are enqueued and run simultaneously on multiple streams. + * + * Usage: + * ~~~~~~ + * PhiloxCudaState in this file, and unpack() in + * cuda/CUDAGraphsUtils.cuh allow non-divergent use of + * CUDAGeneratorImpl whether graph capture is underway or not. + * + * Each PhiloxCudaState instance should be used for one and only one + * consumer kernel. + * + * Example (see e.g. native/cuda/Dropout.cu): + * + * #include + * #include + * + * __global__ void kernel(..., PhiloxCudaState philox_args) { + * auto seeds = at::cuda::philox::unpack(philox_args); + * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; + * curandStatePhilox4_32_10_t state; + * curand_init(std::get<0>(seeds), // seed + * idx, // per-thread subsequence + * std::get<1>(seeds), // offset in subsequence + * &state); + * ... + * } + * + * host_caller(...) { + * PhiloxCudaState rng_engine_inputs; + * { + * // See Note [Acquire lock when using random generators] + * std::lock_guard lock(gen->mutex_); + * + * // gen could be HostState or DevState here! No divergent code needed! + * rng_engine_inputs = gen->philox_cuda_state(offset_increment); + * } + * kernel<<<...>>>(..., rng_engine_inputs); + * } + * + */ + +struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl { + // Constructors + CUDAGeneratorImpl(DeviceIndex device_index = -1); + ~CUDAGeneratorImpl() override = default; + + // CUDAGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + void set_offset(uint64_t offset) override; + uint64_t get_offset() const override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void set_philox_offset_per_thread(uint64_t offset); + uint64_t philox_offset_per_thread() const; + void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph); + uint64_t capture_epilogue(); + PhiloxCudaState philox_cuda_state(uint64_t increment); + + bool reset_rnn_state() { + return !no_reset_rnn_state_.test_and_set(); + } + + // Temporarily accommodates call sites that use philox_engine_inputs. + // Allows incremental refactor of call sites to use philox_cuda_state. + std::pair philox_engine_inputs(uint64_t increment); + + static c10::DeviceType device_type(); + +private: + CUDAGeneratorImpl* clone_impl() const override; + uint64_t seed_ = default_rng_seed_val; + uint64_t philox_offset_per_thread_ = 0; + int64_t* seed_extragraph_{}; + int64_t* offset_extragraph_{}; + uint32_t offset_intragraph_ = 0; + bool graph_expects_this_gen_ = false; + std::atomic_flag no_reset_rnn_state_; +}; + +namespace cuda::detail { + +TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator( + DeviceIndex device_index = -1); +TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1); + +} // namespace cuda::detail +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h new file mode 100644 index 0000000000000000000000000000000000000000..b395de9a252a7e1656340b572cc110113686b207 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { + +struct CUDAGeneratorImpl; + +namespace cuda { + +// Standalone way to get a unique mempool id usable as a pool=... argument +// to CUDAGraph::capture_begin +TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle(); + +struct TORCH_CUDA_CPP_API CUDAGraph { + CUDAGraph(); + ~CUDAGraph(); + + static void inc_pending_event_queries(); + static void dec_pending_event_queries(); + static int num_pending_event_queries(); + void capture_begin(MempoolId_t pool={0, 0}, cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal); + void capture_end(); + void replay(); + void reset(); + MempoolId_t pool(); + void enable_debug_mode(); + void debug_dump(const std::string& debug_path); + + protected: +#if !defined(USE_ROCM) || ROCM_VERSION >= 50300 + cudaGraph_t graph_ = NULL; + cudaGraphExec_t graph_exec_ = NULL; +#endif + + static std::atomic pending_event_queries; + + // internal states so reset() can do its best cleaning up + // Set to true in capture_end if cudaStreamEndCapture succeeded + // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate + // to create graph_exec_, then graph_ is deleted + bool has_graph_ = false; + // Set to true in capture_end if cudaGraphInstantiate succeeded + bool has_graph_exec_ = false; + + // uuid of this instance's current capture, used to + // specify the pool. + CaptureId_t id_; + + // the ID assigned by cuda during graph capture, + // used to identify when a stream is participating in capture + CaptureId_t capture_id_ = -1; + + // uuid used to request a particular private mempool from CUDACachingAllocator. + // By default, this will be set to {id_, 0}. + // + // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_ + // will be set to the other graph's mempool_id_, and therefore share a mempool with the + // other graph. + // + // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(), + // it will share a mempool with any other captures that used "pool=handle". + // + // Sharing a mempool across graphs saves memory, and it's safe if you + // know you'll replay those graphs in the same order you captured them. + MempoolId_t mempool_id_; + + // Stream on which capture began + at::cuda::CUDAStream capture_stream_; + + // Default generator on device where capture began + at::CUDAGeneratorImpl* capture_gen_; + + // Device where capture occurred. Right now, for simplicity, we require all ops + // in a capture to run on the same device, but this is a limitation of CUDAGraph, + // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device + // captures if needed. + int capture_dev_; + + // RNG state trackers + at::Tensor seed_extragraph_; + at::Tensor offset_extragraph_; + uint64_t wholegraph_increment_; +}; + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f3e27a7b159ac6029fc85fd91a048202dfb352e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten. +// This file adds utils used by aten only. + +namespace at::cuda { + +using CaptureId_t = c10::cuda::CaptureId_t; +using CaptureStatus = c10::cuda::CaptureStatus; + +// Use this version where you don't want to create a CUDA context if none exists. +inline CaptureStatus currentStreamCaptureStatus() { +#if !defined(USE_ROCM) || ROCM_VERSION >= 50300 + // don't create a context if we don't have to + if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) { + return c10::cuda::currentStreamCaptureStatusMayInitCtx(); + } else { + return CaptureStatus::None; + } +#else + return CaptureStatus::None; +#endif +} + +inline void assertNotCapturing(std::string attempt) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + attempt, + " during CUDA graph capture. If you need this call to be captured, " + "please file an issue. " + "Current cudaStreamCaptureStatus: ", + status); +} + +inline void errorIfCapturingCudnnBenchmark(std::string version_specific) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + "Current cudaStreamCaptureStatus: ", + status, + "\nCapturing ", + version_specific, + "is prohibited. Possible causes of this error:\n" + "1. No warmup iterations occurred before capture.\n" + "2. The convolutions you're trying to capture use dynamic shapes, " + "in which case capturing them is generally prohibited."); +} + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h new file mode 100644 index 0000000000000000000000000000000000000000..9e3d50f34e77b3e59feb7dd6499703459785f1c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h @@ -0,0 +1,290 @@ +#pragma once + +#include +#include +#include + +#include + +#if defined(USE_ROCM) +#include +#endif + +namespace at::cuda::sparse { + +template +struct CuSparseDescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDASPARSE_CHECK(destructor(x)); + } + } +}; + +template +class CuSparseDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; + +#if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() +template +struct ConstCuSparseDescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDASPARSE_CHECK(destructor(x)); + } + } +}; + +template +class ConstCuSparseDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; +#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS + +#if defined(USE_ROCM) +using cusparseMatDescr = std::remove_pointer::type; +using cusparseDnMatDescr = std::remove_pointer::type; +using cusparseDnVecDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpGEMMDescr = std::remove_pointer::type; +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() +using bsrsv2Info = std::remove_pointer::type; +using bsrsm2Info = std::remove_pointer::type; +#endif +#endif + +// NOTE: This is only needed for CUDA 11 and earlier, since CUDA 12 introduced +// API for const descriptors +cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr); + +class TORCH_CUDA_CPP_API CuSparseMatDescriptor + : public CuSparseDescriptor { + public: + CuSparseMatDescriptor() { + cusparseMatDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } + + CuSparseMatDescriptor(bool upper, bool unit) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + cusparseMatDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type)); + descriptor_.reset(raw_descriptor); + } +}; + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +class TORCH_CUDA_CPP_API CuSparseBsrsv2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsv2Info() { + bsrsv2Info_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +class TORCH_CUDA_CPP_API CuSparseBsrsm2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsm2Info() { + bsrsm2Info_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +#if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type); + +#if AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() +class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); +}; + +class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseConstDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); + cusparseDnMatDescr* unsafe_mutable_descriptor() const { + return const_cast(descriptor()); + } + cusparseDnMatDescr* unsafe_mutable_descriptor() { + return const_cast(descriptor()); + } +}; + +class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnVecDescriptor(const Tensor& input); +}; + +class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor + : public CuSparseDescriptor {}; + +#elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() + class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor + : public ConstCuSparseDescriptor< + cusparseDnMatDescr, + &cusparseDestroyDnMat> { + public: + explicit CuSparseDnMatDescriptor( + const Tensor& input, + int64_t batch_offset = -1); + }; + + class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor + : public ConstCuSparseDescriptor< + const cusparseDnMatDescr, + &destroyConstDnMat> { + public: + explicit CuSparseConstDnMatDescriptor( + const Tensor& input, + int64_t batch_offset = -1); + cusparseDnMatDescr* unsafe_mutable_descriptor() const { + return const_cast(descriptor()); + } + cusparseDnMatDescr* unsafe_mutable_descriptor() { + return const_cast(descriptor()); + } + }; + + class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor + : public ConstCuSparseDescriptor< + cusparseDnVecDescr, + &cusparseDestroyDnVec> { + public: + explicit CuSparseDnVecDescriptor(const Tensor& input); + }; + + class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor + : public ConstCuSparseDescriptor< + cusparseSpMatDescr, + &cusparseDestroySpMat> {}; +#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() + +class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor + : public CuSparseSpMatDescriptor { + public: + explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1); + + std::tuple get_size() { + int64_t rows, cols, nnz; + TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize( + this->descriptor(), + &rows, + &cols, + &nnz)); + return std::make_tuple(rows, cols, nnz); + } + + void set_tensor(const Tensor& input) { + auto crow_indices = input.crow_indices(); + auto col_indices = input.col_indices(); + auto values = input.values(); + + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous()); + TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers( + this->descriptor(), + crow_indices.data_ptr(), + col_indices.data_ptr(), + values.data_ptr())); + } + +#if AT_USE_CUSPARSE_GENERIC_SPSV() + void set_mat_fill_mode(bool upper) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_FILL_MODE, + &fill_mode, + sizeof(fill_mode))); + } + + void set_mat_diag_type(bool unit) { + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_DIAG_TYPE, + &diag_type, + sizeof(diag_type))); + } +#endif +}; + +#if AT_USE_CUSPARSE_GENERIC_SPSV() +class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSVDescriptor() { + cusparseSpSVDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#if AT_USE_CUSPARSE_GENERIC_SPSM() +class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSMDescriptor() { + cusparseSpSMDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || !defined(USE_ROCM) +class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpGEMMDescriptor() { + cusparseSpGEMMDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +} // namespace at::cuda::sparse diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..65ad7f7d16e24bbc46af7cacc1c8233436109be2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +// +// A caching allocator for CUDA host allocations (pinned memory). +// +// This provides a drop-in replacement for THCudaHostAllocator, which re-uses +// freed pinned (page-locked) memory allocations. This avoids device +// synchronizations due to cudaFreeHost calls. +// +// To ensure correct behavior, THCCachingHostAllocator_recordEvent must be +// called anytime a pointer from this allocator is used in a cudaMemcpyAsync +// call between host and device, and passed the corresponding context from the +// allocation. This is currently invoked by at::native::copy_kernel_cuda. +// +// Note that this allocator does not split larger allocations into smaller +// blocks, unlike the caching device allocator. +// +TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator(); + +// Records an event in the specified stream. The allocation corresponding to the +// input `ptr`/`ctx` will not be re-used until the event has occurred. +TORCH_CUDA_CPP_API bool +CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream); + +// Releases cached pinned memory allocations via cudaHostFree +TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache(); + +inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) { + return getCachingHostAllocator()->allocate(size); +} + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..c647bc2531b4bb624430ea454805197f68bfca0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h @@ -0,0 +1,174 @@ +#pragma once + +#include +#include +#include + +#ifdef CUDART_VERSION +#include +#endif + +#include +#include +#include + + +namespace c10 { + +class CuDNNError : public c10::Error { + using Error::Error; +}; + +} // namespace c10 + +#define AT_CUDNN_FRONTEND_CHECK(EXPR, ...) \ + do { \ + auto error_object = EXPR; \ + if (!error_object.is_good()) { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN Frontend error: ", error_object.get_message()); \ + } \ + } while (0) \ + +#define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__) + +// See Note [CHECK macro] +#define AT_CUDNN_CHECK(EXPR, ...) \ + do { \ + cudnnStatus_t status = EXPR; \ + if (status != CUDNN_STATUS_SUCCESS) { \ + if (status == CUDNN_STATUS_NOT_SUPPORTED) { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", \ + cudnnGetErrorString(status), \ + ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \ + } else { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +namespace at::cuda::blas { +C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error); +} // namespace at::cuda::blas + +#define TORCH_CUDABLAS_CHECK(EXPR) \ + do { \ + cublasStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \ + "CUDA error: ", \ + at::cuda::blas::_cublasGetErrorEnum(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +const char *cusparseGetErrorString(cusparseStatus_t status); + +#define TORCH_CUDASPARSE_CHECK(EXPR) \ + do { \ + cusparseStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \ + "CUDA error: ", \ + cusparseGetErrorString(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +// cusolver related headers are only supported on cuda now +#ifdef CUDART_VERSION + +namespace at::cuda::solver { +C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status); + +constexpr const char* _cusolver_backend_suggestion = \ + "If you keep seeing this error, you may use " \ + "`torch.backends.cuda.preferred_linalg_library()` to try " \ + "linear algebra operators with other supported backends. " \ + "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library"; + +} // namespace at::cuda::solver + +// When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. +// When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue. +#define TORCH_CUSOLVER_CHECK(EXPR) \ + do { \ + cusolverStatus_t __err = EXPR; \ + if ((CUDA_VERSION < 11500 && \ + __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \ + (CUDA_VERSION >= 11500 && \ + __err == CUSOLVER_STATUS_INVALID_VALUE)) { \ + TORCH_CHECK_LINALG( \ + false, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`", \ + ". This error may appear if the input matrix contains NaN. ", \ + at::cuda::solver::_cusolver_backend_suggestion); \ + } else { \ + TORCH_CHECK( \ + __err == CUSOLVER_STATUS_SUCCESS, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`. ", \ + at::cuda::solver::_cusolver_backend_suggestion); \ + } \ + } while (0) + +#else +#define TORCH_CUSOLVER_CHECK(EXPR) EXPR +#endif + +#define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR) + +// For CUDA Driver API +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#if !defined(USE_ROCM) + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + const char* err_str; \ + CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \ + if (get_error_str_err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: unknown error"); \ + } else { \ + AT_ERROR("CUDA driver error: ", err_str); \ + } \ + } \ + } while (0) + +#else + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: ", static_cast(__err)); \ + } \ + } while (0) + +#endif + +// For CUDA NVRTC +// +// Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE, +// incorrectly produces the error string "NVRTC unknown error." +// The following maps it correctly. +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#define AT_CUDA_NVRTC_CHECK(EXPR) \ + do { \ + nvrtcResult __err = EXPR; \ + if (__err != NVRTC_SUCCESS) { \ + if (static_cast(__err) != 7) { \ + AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \ + } else { \ + AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \ + } \ + } \ + } while (0) diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7081e94837caa7d5050128e0bfe19aa67f93cd39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include + +// NumericLimits.cuh is a holder for numeric limits definitions of commonly used +// types. This header is very specific to ROCm HIP and may be removed in the future. +// This header is derived from the legacy THCNumerics.cuh. + +// The lower_bound and upper_bound constants are same as lowest and max for +// integral types, but are -inf and +inf for floating point types. They are +// useful in implementing min, max, etc. + +namespace at { + +template +struct numeric_limits { +}; + +// WARNING: the following at::numeric_limits definitions are there only to support +// HIP compilation for the moment. Use std::numeric_limits if you are not +// compiling for ROCm. +// from @colesbury: "The functions on numeric_limits aren't marked with +// __device__ which is why they don't work with ROCm. CUDA allows them +// because they're constexpr." + +namespace { + // ROCm doesn't like INFINITY too. + constexpr double inf = INFINITY; +} + +template <> +struct numeric_limits { + static inline __host__ __device__ bool lowest() { return false; } + static inline __host__ __device__ bool max() { return true; } + static inline __host__ __device__ bool lower_bound() { return false; } + static inline __host__ __device__ bool upper_bound() { return true; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ uint8_t lowest() { return 0; } + static inline __host__ __device__ uint8_t max() { return UINT8_MAX; } + static inline __host__ __device__ uint8_t lower_bound() { return 0; } + static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int8_t lowest() { return INT8_MIN; } + static inline __host__ __device__ int8_t max() { return INT8_MAX; } + static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; } + static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int16_t lowest() { return INT16_MIN; } + static inline __host__ __device__ int16_t max() { return INT16_MAX; } + static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; } + static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int32_t lowest() { return INT32_MIN; } + static inline __host__ __device__ int32_t max() { return INT32_MAX; } + static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; } + static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; } +}; + +template <> +struct numeric_limits { +#ifdef _MSC_VER + static inline __host__ __device__ int64_t lowest() { return _I64_MIN; } + static inline __host__ __device__ int64_t max() { return _I64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; } +#else + static inline __host__ __device__ int64_t lowest() { return INT64_MIN; } + static inline __host__ __device__ int64_t max() { return INT64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; } +#endif +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); } + static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ float lowest() { return -FLT_MAX; } + static inline __host__ __device__ float max() { return FLT_MAX; } + static inline __host__ __device__ float lower_bound() { return -static_cast(inf); } + static inline __host__ __device__ float upper_bound() { return static_cast(inf); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ double lowest() { return -DBL_MAX; } + static inline __host__ __device__ double max() { return DBL_MAX; } + static inline __host__ __device__ double lower_bound() { return -inf; } + static inline __host__ __device__ double upper_bound() { return inf; } +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h new file mode 100644 index 0000000000000000000000000000000000000000..1abf1dcfc12447d7226df19701486cd6bc2affee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h @@ -0,0 +1,11 @@ +#include +#include + +namespace at::cuda { +namespace detail { +void init_p2p_access_cache(int64_t num_devices); +} + +TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev); + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..854f5d8dd1297e3b99fe347358291c9a97c37e1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() { + return getCachingHostAllocator(); +} +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h new file mode 100644 index 0000000000000000000000000000000000000000..d31bf68ccafba92a242894e8f426329693212f5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h @@ -0,0 +1,10 @@ +#pragma once +#include +#include + +namespace at::cuda { + +// enqueues a kernel that spins for the specified number of cycles +TORCH_CUDA_CU_API void sleep(int64_t cycles); + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..85783c303534ec9f6190c2d6bb44a8faafd680f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +/// Allocator for Thrust to re-route its internal device allocations +/// to the THC allocator +class ThrustAllocator { +public: + typedef char value_type; + + char* allocate(std::ptrdiff_t size) { + return static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(size)); + } + + void deallocate(char* p, size_t size) { + c10::cuda::CUDACachingAllocator::raw_delete(p); + } +}; + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..062c365a4e1a9af6160cb01d487d6c7a37483286 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh @@ -0,0 +1,413 @@ +#pragma once +#include + +#include +#include +#include +#include + +#include + +#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() + +#include + +#else + +// include cub in a safe manner, see: +// https://github.com/pytorch/pytorch/pull/55292 +#undef CUB_NS_POSTFIX //undef to avoid redefinition warnings +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER +#define CUB_NS_PREFIX namespace at_cuda_detail { +#define CUB_NS_POSTFIX } +#define CUB_NS_QUALIFIER ::at_cuda_detail::cub +#include +#undef CUB_NS_POSTFIX +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER + +#endif + +#include +#include +#include + +// handle the temporary storage and 'twice' calls for cub API +#define CUB_WRAPPER(func, ...) do { \ + size_t temp_storage_bytes = 0; \ + func(nullptr, temp_storage_bytes, __VA_ARGS__); \ + auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \ + auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \ + func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \ + AT_CUDA_CHECK(cudaGetLastError()); \ +} while (false) + +#ifdef USE_ROCM +#define NO_ROCM(x) +#define ROCM_HIPCUB(x) ::hipcub +#else +#define NO_ROCM(x) x +#define ROCM_HIPCUB(x) x +#endif + +#if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || \ + (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +#if !defined(USE_ROCM) +namespace at_cuda_detail { +#endif + +// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 + +template <> +struct ROCM_HIPCUB(cub)::FpLimits +{ + static __host__ __device__ __forceinline__ c10::BFloat16 Max() { + unsigned short max_word = 0x7F7F; + return reinterpret_cast(max_word); + } + + static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { + unsigned short lowest_word = 0xFF7F; + return reinterpret_cast(lowest_word); + } +}; + +template <> +struct ROCM_HIPCUB(cub)::NumericTraits: + ROCM_HIPCUB(cub)::BaseTraits {}; + +#if !defined(USE_ROCM) +} // namespace at_cuda_detail +#endif + +#endif + +#if !defined(USE_ROCM) +namespace at::native { +namespace cub = ::at_cuda_detail::cub; +} // namespace at::native +#endif + +namespace at::cuda::cub { + +namespace detail { + +template +struct cuda_type { + using type = T; +}; +template<> +struct cuda_type { + using type = __half; +}; + +#if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16() + +template<> +struct cuda_type { + using type = __nv_bfloat16; +}; + +#elif (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +template<> +struct cuda_type { + using type = hip_bfloat16; +}; + +#endif + +} // namespace detail + +template +inline void segmented_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t num_elements, int64_t num_segments, + OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, + bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8 +) { + TORCH_CHECK(num_elements <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + TORCH_CHECK(num_segments <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + using key_t_ = typename detail::cuda_type::type; + + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + + if (keys_out == nullptr) { + keys_out_owner = allocator->allocate(num_elements * sizeof(key_t)); + keys_out = reinterpret_cast(keys_out_owner.get()); + } + + const key_t_ *keys_in_ = reinterpret_cast(keys_in); + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +} + +#if CUB_SUPPORTS_UNIQUE_BY_KEY() +template +inline void unique_by_key( + KeysInputIteratorT keys_in, ValuesInputIteratorT values_in, + KeysOutputIteratorT keys_out, ValuesOutputIteratorT values_out, + NumSelectedIteratorT num_selected, int64_t num_input_items) +{ + // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed. + constexpr bool null_keys_out = std::is_same::value; + using KeyT = typename std::iterator_traits::value_type; + using RealKeysOutputIteratorT = typename std::conditional::type; + RealKeysOutputIteratorT keys_out_; + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + if constexpr (null_keys_out) { + keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT)); + keys_out_ = static_cast(keys_out_owner.get()); + } else { + keys_out_ = keys_out; + } + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey, + keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream()); +} +#endif + +namespace impl { + +template +C10_LAUNCH_BOUNDS_1(1) +__global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){ + // NOTE: out here not the final scan output, but an intermediate of the accumulation type. + using acc_t = typename std::iterator_traits::value_type; + *out = scan_op(static_cast(*a), static_cast(*b)); +} + +#if !CUB_SUPPORTS_FUTURE_VALUE() +template +struct chained_iterator { + using iterator_category = std::random_access_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = ValueT; + using pointer = ValueT*; + using reference = ValueT&; + + InputIteratorT iter; + ValueT *first; + difference_type offset = 0; + + __device__ ValueT operator[](difference_type i) { + i += offset; + if (i == 0) { + return *first; + } else { + return ValueT(iter[i - 1]); + } + } + __device__ chained_iterator operator+(difference_type i) { + return chained_iterator{iter, first, i}; + } + __device__ ValueT operator*() { + return (*this)[0]; + } +}; +#endif + +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 +} + +// non synchronizing cub call +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +template +inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + using input_t = typename std::iterator_traits::value_type; + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(input_t)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { + return *first_elem_ptr; + } else { + return x.value; + } + }; + auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i + 1, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +template +inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i - 1, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +#if CUB_SUPPORTS_SCAN_BY_KEY() + +template +inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey, + keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +template +inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey, + keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +#endif + +template +void unique(InputIteratorT input, OutputIteratorT output, + NumSelectedIteratorT num_selected_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub unique does not support more than INT_MAX elements"); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); +} + +template +void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out, + LengthOutputIteratorT length_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub run_length_encode does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode, + input, output, counts_out, length_out, num_items, + at::cuda::getCurrentCUDAStream()); +} + +template +void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub reduce does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce, + input, output, num_items, op, init, + at::cuda::getCurrentCUDAStream()); + +} + +} // namespace at::cuda::cub diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh new file mode 100644 index 0000000000000000000000000000000000000000..55b2f21dd1b48c77ccb96ce7de82f9a28ba3725a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh @@ -0,0 +1,53 @@ +#pragma once + +#if !defined(USE_ROCM) +#include // for CUDA_VERSION +#endif + +#if !defined(USE_ROCM) +#include +#else +#define CUB_VERSION 0 +#endif + +// cub sort support for __nv_bfloat16 is added to cub 1.13 in: +// https://github.com/NVIDIA/cub/pull/306 +#if CUB_VERSION >= 101300 +#define CUB_SUPPORTS_NV_BFLOAT16() true +#else +#define CUB_SUPPORTS_NV_BFLOAT16() false +#endif + +// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: +// https://github.com/NVIDIA/cub/pull/326 +// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake +// starting from CUDA 11.5 +#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true +#else +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false +#endif + +// cub support for UniqueByKey is added to cub 1.16 in: +// https://github.com/NVIDIA/cub/pull/405 +#if CUB_VERSION >= 101600 +#define CUB_SUPPORTS_UNIQUE_BY_KEY() true +#else +#define CUB_SUPPORTS_UNIQUE_BY_KEY() false +#endif + +// cub support for scan by key is added to cub 1.15 +// in https://github.com/NVIDIA/cub/pull/376 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_SCAN_BY_KEY() 1 +#else +#define CUB_SUPPORTS_SCAN_BY_KEY() 0 +#endif + +// cub support for cub::FutureValue is added to cub 1.15 in: +// https://github.com/NVIDIA/cub/pull/305 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_FUTURE_VALUE() true +#else +#define CUB_SUPPORTS_FUTURE_VALUE() false +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..eef073fbd86b72cfd4283e73e4b5a711669e9c3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h @@ -0,0 +1,249 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at::native { + + +#define AT_FOR_8_CASES(_) \ + _(1) \ + _(2) \ + _(3) \ + _(4) \ + _(5) \ + _(6) \ + _(7) \ + _(8) + +#define AT_FOR_8_CASES_WITH_COMMA(_) \ + _(1) , \ + _(2) , \ + _(3) , \ + _(4) , \ + _(5) , \ + _(6) , \ + _(7) , \ + _(8) + +c10::SmallVector get_extra_args_typenames(const c10::SmallVector& extra_args) { + c10::SmallVector args_typenames(extra_args.size()); + for (const auto i : c10::irange(extra_args.size())) { + args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type()); + } + return args_typenames; +} + +int can_vectorize_up_to(at::ScalarType type, char* pointer) { + switch(type) { +#define DEFINE_CASE(ctype, scalartype) \ + case ScalarType::scalartype : return memory::can_vectorize_up_to(pointer); + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +// jitted version of the above +// See Note [Jiterator], this relies on the assumptions enumerated there +int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) { + const at::ScalarType common_dtype = iter.common_dtype(); + const at::ScalarType result_dtype = common_dtype; + + // Deals with output + int result = can_vectorize_up_to(result_dtype, static_cast(iter.data_ptr(0))); + + // Incorporates input(s) + for (auto i = 1; i < iter.ntensors(); ++i) { + result = std::min(result, can_vectorize_up_to(common_dtype, static_cast(iter.data_ptr(i)))); + } + + return result; +} + +template +static std::unique_ptr> make_unique_offset_calculator( + const TensorIteratorBase& iter) { + // array size can not be 0, this happens when N == 0 + constexpr int array_size = std::max(N, 1); + TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs())); + + std::array strides; + int64_t element_sizes[array_size]; + for (int i = 0; i < N; i++) { + int index = IS_INPUT ? i + iter.noutputs() : i; + strides[i] = iter.strides(index).data(); + element_sizes[i] = iter.element_size(index); + } + return std::make_unique>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +struct OffsetCalculatorVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using OffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + OffsetCalculatorVariant(const TensorIteratorBase& iter) { + int num = IS_INPUT ? iter.ninputs() : iter.noutputs(); + + switch(num) { +#define DEFINE_CASE(index) \ + case index : v = make_unique_offset_calculator(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + default: + TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + + private: + OffsetCalculatorTypes v; +}; + +struct ArrayVariant { +// works for up to 8 input + 8 outputs +#define DEFINE_CASE(index) at::detail::Array, at::detail::Array + using ArrayTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + ArrayVariant(const TensorIteratorBase& iter) { + int ntensors = iter.ntensors(); + switch(ntensors) { +#define DEFINE_CASE(index) \ + case index: array = at::detail::Array{}; break; \ + case index+8: array = at::detail::Array{}; break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors); + } + + std::visit([&](auto& a) { + for (auto i = 0; i < ntensors; ++i) { + a[i] = (char*)iter.data_ptr(i); + } + }, array); + } + + void* data_ptr() { + return std::visit([](auto & a){ return static_cast(&a); }, array); + } + +private: + ArrayTypes array; +}; + +struct TrivialOffsetCalculatorVariant { +#define DEFINE_CASE(index) TrivialOffsetCalculator + using TrivialOffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + TrivialOffsetCalculatorVariant(int num) { + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = TrivialOffsetCalculator(); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(&v); }, v); + } + +private: + TrivialOffsetCalculatorTypes v; +}; + +struct LoadWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using LoadWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + LoadWithCastVariant(const TensorIteratorBase& iter) { + int arity = iter.ninputs(); + switch(arity) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + LoadWithCastPtr v; +}; + +struct StoreWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using StoreWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + StoreWithCastVariant(const TensorIteratorBase& iter) { + int num = iter.noutputs(); + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + StoreWithCastPtr v; +}; + +} // namespace at::native + + +#endif // AT_USE_JITERATOR() diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..aba40d4f42eac74ee9435d904ac3fb82d1e988c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +TORCH_CUDA_CPP_API const std::string &get_traits_string(); +TORCH_CUDA_CPP_API const std::string &get_cmath_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_math_string(); + +} // namespace at::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..88a29547406cd144a2df80e1505e3fd433d0ee59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h @@ -0,0 +1,29 @@ +// Copyright © 2022 Apple Inc. + +#pragma once +#include + +namespace at::detail { + +C10_EXPORT TensorBase empty_mps( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); +C10_EXPORT TensorBase empty_mps( + IntArrayRef size, const TensorOptions &options); + +C10_EXPORT TensorBase empty_strided_mps( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + c10::optional device_opt); + +C10_EXPORT TensorBase empty_strided_mps( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions &options); + +} // namespace at::detail diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h new file mode 100644 index 0000000000000000000000000000000000000000..2b3cfae0c3eeaa4754c1fa97f2ee9c02efe6b2d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h @@ -0,0 +1,630 @@ +#pragma once + +namespace at::mps { + +static const char * indexing_metal_shaders = R"INDEX_METAL( +#include +#include + +using namespace metal; + +#if __METAL_VERSION__ < 300 +struct IndexAB { + // Allow up to 16 indices + metal::array indexArray [[ id(0) ]]; +}; +#else +struct IndexAB { + constant int64_t* indexArray; +}; + +#endif + +template +kernel void index_select( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant OffsetsT * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]) { + constant int64_t * index_sizes = (constant int64_t *)indexSizes; + constant int64_t * index_strides = (constant int64_t *)indexStrides; + int64_t offset = 0; + for (uint32_t i = 0; i < num_indices; i++) { +#if __METAL_VERSION__ >= 300 + constant int64_t* indexArray = indexAB[i].indexArray; +#else + constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i]; +#endif + int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)]; + if (index < 0) { + index += index_sizes[i]; + } + offset += index * index_strides[i]; + } + device T * out = (device T*)((device char*)outputData + offsets[thread_index].x); + constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y + offset); + *out = *in; +} + +template +void index_put_impl( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB, +#else + constant IndexAB & indexAB, +#endif + constant int64_t * index_sizes, + constant int64_t * index_strides, + constant OffsetsT * offsets, + constant void * inputData, + device void * outputData, + constant uint32_t & num_indices, + uint thread_index) { + int64_t offset = 0; + for (uint32_t i = 0; i < num_indices; i++) { +#if __METAL_VERSION__ >= 300 + constant int64_t* indexArray = indexAB[i].indexArray; +#else + constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i]; +#endif + int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)]; + + if (index < 0) { + index += index_sizes[i]; + } + offset += index * index_strides[i]; + } + device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset); + constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y); + *out = *in; +} + +template +kernel void index_put_serial( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant OffsetsT * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + constant uint * numIters [[buffer(7)]], + uint thread_index [[thread_position_in_grid]]) { + + constant int64_t * index_sizes = (constant int64_t *)indexSizes; + constant int64_t * index_strides = (constant int64_t *)indexStrides; + + for (uint iter_i = 0; iter_i < *numIters; iter_i++) { + index_put_impl(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, iter_i); + } +} + +template +kernel void index_put( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant OffsetsT * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]) { + + constant int64_t * index_sizes = (constant int64_t *)indexSizes; + constant int64_t * index_strides = (constant int64_t *)indexStrides; + index_put_impl(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, thread_index); +} + +#if __METAL_VERSION__ < 300 +#define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \ +template \ +[[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \ +kernel void index_ ## INDEX_OP_TYPE( \ + constant IndexAB & indexAB [[buffer(0)]], \ + constant void * indexSizes [[buffer(1)]], \ + constant void * indexStrides [[buffer(2)]], \ + constant IDX_DTYPE * offsets [[buffer(3)]], \ + constant void * inputData [[buffer(4)]], \ + device void * outputData [[buffer(5)]], \ + constant uint32_t & num_indices [[buffer(6)]], \ + uint thread_index [[thread_position_in_grid]]); +#else +#define REGISTER_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \ +template \ +[[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \ +kernel void index_ ## INDEX_OP_TYPE( \ + constant IndexAB * indexAB [[buffer(0)]], \ + constant void * indexSizes [[buffer(1)]], \ + constant void * indexStrides [[buffer(2)]], \ + constant IDX_DTYPE * offsets [[buffer(3)]], \ + constant void * inputData [[buffer(4)]], \ + device void * outputData [[buffer(5)]], \ + constant uint32_t & num_indices [[buffer(6)]], \ + uint thread_index [[thread_position_in_grid]]); +#endif + +#define REGISTER_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \ + REGISTER_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \ + REGISTER_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \ + REGISTER_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \ + REGISTER_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \ + REGISTER_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \ + REGISTER_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \ + REGISTER_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \ + REGISTER_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3); + +REGISTER_INDEX_OP_ALL_DTYPES(select); +REGISTER_INDEX_OP_ALL_DTYPES(put); + +#if __METAL_VERSION__ < 300 +#define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \ +template \ +[[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \ +kernel void index_ ## INDEX_OP_TYPE( \ + constant IndexAB & indexAB [[buffer(0)]], \ + constant void * indexSizes [[buffer(1)]], \ + constant void * indexStrides [[buffer(2)]], \ + constant IDX_DTYPE * offsets [[buffer(3)]], \ + constant void * inputData [[buffer(4)]], \ + device void * outputData [[buffer(5)]], \ + constant uint32_t & num_indices [[buffer(6)]], \ + constant uint * numIters [[buffer(7)]], \ + uint thread_index [[thread_position_in_grid]]); +#else +#define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, IDX_SIZE, DTYPE, INDEX_OP_TYPE, IDX_DTYPE) \ +template \ +[[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE "_" #IDX_SIZE)]] \ +kernel void index_ ## INDEX_OP_TYPE( \ + constant IndexAB * indexAB [[buffer(0)]], \ + constant void * indexSizes [[buffer(1)]], \ + constant void * indexStrides [[buffer(2)]], \ + constant IDX_DTYPE * offsets [[buffer(3)]], \ + constant void * inputData [[buffer(4)]], \ + device void * outputData [[buffer(5)]], \ + constant uint32_t & num_indices [[buffer(6)]], \ + constant uint * numIters [[buffer(7)]], \ + uint thread_index [[thread_position_in_grid]]); +#endif + +#define REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \ + REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx32, char, INDEX_OP_TYPE, uint3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(8bit, idx64, char, INDEX_OP_TYPE, ulong3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx32, short, INDEX_OP_TYPE, uint3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(16bit, idx64, short, INDEX_OP_TYPE, ulong3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx32, int, INDEX_OP_TYPE, uint3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(32bit, idx64, int, INDEX_OP_TYPE, ulong3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx32, long, INDEX_OP_TYPE, uint3); \ + REGISTER_SINGLE_THREADED_INDEX_OP(64bit, idx64, long, INDEX_OP_TYPE, ulong3); + +REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(put_serial); + +template +kernel void kernel_index_offsets(constant StridesT * strides [[buffer(0)]], + device DataT * data_offsets [[buffer(1)]], + constant uint * iter_shape [[buffer(2)]], + constant uint & num_dimensions [[buffer(3)]], + uint thread_index [[thread_position_in_grid]]) { + data_offsets[thread_index] = 0; + uint32_t idx = thread_index; + for (uint32_t dim = 0; dim < num_dimensions; dim++) { + uint32_t remainder = idx % iter_shape[dim]; + idx /= iter_shape[dim]; + + data_offsets[thread_index] += remainder * DataT(strides[dim]); + } +} + +template +[[host_name("kernel_index_offsets_32")]] +kernel void kernel_index_offsets( + constant packed_uint3 * strides [[buffer(0)]], + device uint3 * data_offsets [[buffer(1)]], + constant uint * iter_shape [[buffer(2)]], + constant uint & num_dimensions [[buffer(3)]], + uint thread_index [[thread_position_in_grid]]); + +template +[[host_name("kernel_index_offsets_64")]] +kernel void kernel_index_offsets( + constant packed_uint3 * strides [[buffer(0)]], + device ulong3 * data_offsets [[buffer(1)]], + constant uint * iter_shape [[buffer(2)]], + constant uint & num_dimensions [[buffer(3)]], + uint thread_index [[thread_position_in_grid]]); + +template +kernel void index_put_accumulate_native_dtypes( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant OffsetsT * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]) { + constant int64_t * index_sizes = (constant int64_t *)indexSizes; + constant int64_t * index_strides = (constant int64_t *)indexStrides; + int64_t offset = 0; + for (uint32_t i = 0; i < num_indices; i++) { +#if __METAL_VERSION__ >= 300 + constant int64_t* indexArray = indexAB[i].indexArray; +#else + constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i]; +#endif + int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)]; + if (index < 0) { + index += index_sizes[i]; + } + offset += index * index_strides[i]; + } + device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset); + constant E * in = (constant E*)((constant char*)inputData + offsets[thread_index].y); + atomic_fetch_add_explicit(out, *in, memory_order_relaxed); +} + +template +__attribute__((__always_inline__)) void atomic_fetch_add_relaxed(device void * addr, T value) { + device atomic_uint* uintAddr = (device atomic_uint*)addr; + uint expected = atomic_load_explicit(uintAddr, memory_order_relaxed); + T updated = as_type(expected) + value; + while (!atomic_compare_exchange_weak_explicit(uintAddr, &expected, as_type(updated), memory_order_relaxed, memory_order_relaxed)) { + updated = as_type(expected) + value; + } +} + +template +kernel void atomic_index_put_accumulate( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant OffsetsT * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]) { + constant int64_t * index_sizes = (constant int64_t *)indexSizes; + constant int64_t * index_strides = (constant int64_t *)indexStrides; + int64_t offset = 0; + for (uint32_t i = 0; i < num_indices; i++) { +#if __METAL_VERSION__ >= 300 + constant int64_t* indexArray = indexAB[i].indexArray; +#else + constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i]; +#endif + int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)]; + if (index < 0) { + index += index_sizes[i]; + } + offset += index * index_strides[i]; + } + device void * out = (device void*)((device char*)outputData + offsets[thread_index].x + offset); + constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y); + atomic_fetch_add_relaxed(out, *in); +} + +template +[[host_name("index_put_accumulate_32bit_float_idx32")]] +kernel void atomic_index_put_accumulate( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant uint3 * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]); + +template +[[host_name("index_put_accumulate_32bit_float_idx64")]] +kernel void atomic_index_put_accumulate( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant ulong3 * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]); + +template +[[host_name("index_put_accumulate_32bit_int_idx32")]] +kernel void index_put_accumulate_native_dtypes( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant uint3 * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]); + +template +[[host_name("index_put_accumulate_32bit_int_idx64")]] +kernel void index_put_accumulate_native_dtypes( +#if __METAL_VERSION__ >= 300 + constant IndexAB * indexAB [[buffer(0)]], +#else + constant IndexAB & indexAB [[buffer(0)]], +#endif + constant void * indexSizes [[buffer(1)]], + constant void * indexStrides [[buffer(2)]], + constant ulong3 * offsets [[buffer(3)]], + constant void * inputData [[buffer(4)]], + device void * outputData [[buffer(5)]], + constant uint32_t & num_indices [[buffer(6)]], + uint thread_index [[thread_position_in_grid]]); +)INDEX_METAL"; + +static const char *SCATTER_OPS_TEMPLATE = R"METAL_SCATTER( +struct __attribute__ ((packed)) packed_uint5{{ + uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u; +}}; + +template +Y cast(const X x); + +template<> +{1} cast<{1}, {0}>(const {0} x) {{ + return {2}; +}} + +kernel void scatter_kernel_5(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint5 & size [[buffer(2)]], + constant packed_uint5 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint5 local_index; + local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x; + local_index.y = linear_index / (size.u * size.w * size.z) % size.y; + local_index.z = linear_index / (size.u * size.w) % size.z; + local_index.w = linear_index / size.u % size.w; + local_index.u = linear_index % size.u; + + packed_uint5 strided_index; + strided_index.x = local_index.x * stride.x; + strided_index.y = local_index.y * stride.y; + strided_index.z = local_index.z * stride.z; + strided_index.w = local_index.w * stride.w; + strided_index.u = local_index.u * stride.u; + + dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u] = cast<{1}>(src[linear_index]); +}} + +kernel void scatter_kernel_4(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint4 & size [[buffer(2)]], + constant packed_uint4 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint4 local_index; + local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0]; + local_index.y = linear_index / (size[3] * size[2]) % size[1]; + local_index.z = linear_index / size[3] % size[2]; + local_index.w = linear_index % size[3]; + + const packed_uint4 strided_index = local_index * stride; + dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w] = cast<{1}>(src[linear_index]); +}} + +kernel void scatter_kernel_3(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint3 & size [[buffer(2)]], + constant packed_uint3 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint3 local_index; + local_index.x = linear_index / (size[2] * size[1]) % size[0]; + local_index.y = linear_index / size[2] % size[1]; + local_index.z = linear_index % size[2]; + + const packed_uint3 strided_index = local_index * stride; + dst[strided_index.x + strided_index.y + strided_index.z] = cast<{1}>(src[linear_index]); +}} + +kernel void scatter_kernel_2(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint2 & size [[buffer(2)]], + constant packed_uint2 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint2 local_index; + local_index.x = linear_index / size[1] % size[0]; + local_index.y = linear_index % size[1]; + + const packed_uint2 strided_index = local_index * stride; + dst[strided_index.x + strided_index.y] = cast<{1}>(src[linear_index]); +}} + +kernel void scatter_kernel_1(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant int & size [[buffer(2)]], + constant int & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + const int local_index = linear_index % size; + const int strided_index = local_index * stride; + dst[strided_index] = cast<{1}>(src[linear_index]); +}} +)METAL_SCATTER"; + +static const char *GATHER_OPS_TEMPLATE = R"METAL_GATHER( +struct __attribute__ ((packed)) packed_uint5{{ + uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u; +}}; + +template +Y cast(const X x); + +template<> +{1} cast<{1}, {0}>(const {0} x) {{ + return {2}; +}} + +kernel void gather_kernel_5(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint5 & size [[buffer(2)]], + constant packed_uint5 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + + packed_uint5 local_index; + local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x; + local_index.y = linear_index / (size.u * size.w * size.z) % size.y; + local_index.z = linear_index / (size.u * size.w) % size.z; + local_index.w = linear_index / size.u % size.w; + local_index.u = linear_index % size.u; + + packed_uint5 strided_index; + strided_index.x = local_index.x * stride.x; + strided_index.y = local_index.y * stride.y; + strided_index.z = local_index.z * stride.z; + strided_index.w = local_index.w * stride.w; + strided_index.u = local_index.u * stride.u; + + dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u]); +}} + +kernel void gather_kernel_4(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint4 & size [[buffer(2)]], + constant packed_uint4 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint4 local_index; + local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0]; + local_index.y = linear_index / (size[3] * size[2]) % size[1]; + local_index.z = linear_index / size[3] % size[2]; + local_index.w = linear_index % size[3]; + + const packed_uint4 strided_index = local_index * stride; + dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w]); +}} + +kernel void gather_kernel_3(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint3 & size [[buffer(2)]], + constant packed_uint3 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint3 local_index; + local_index.x = linear_index / (size[2] * size[1]) % size[0]; + local_index.y = linear_index / size[2] % size[1]; + local_index.z = linear_index % size[2]; + + const packed_uint3 strided_index = local_index * stride; + dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z]); +}} + +kernel void gather_kernel_2(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant packed_uint2 & size [[buffer(2)]], + constant packed_uint2 & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + packed_uint2 local_index; + local_index.x = linear_index / size[1] % size[0]; + local_index.y = linear_index % size[1]; + + const packed_uint2 strided_index = local_index * stride; + dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y]); +}} + +kernel void gather_kernel_1(uint linear_index [[thread_position_in_grid]], + constant void * src_ [[buffer(0)]], + device void * dst_ [[buffer(1)]], + constant int & size [[buffer(2)]], + constant int & stride [[buffer(3)]], + constant uint32_t & numel [[buffer(4)]]) {{ + if (linear_index >= numel) return; + + constant {0} * src = (constant {0} *)src_; + device {1} * dst = (device {1} *)dst_; + + const int local_index = linear_index % size; + const int strided_index = local_index * stride; + dst[linear_index] = cast<{1}>(src[strided_index]); +}} +)METAL_GATHER"; +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..bdf19e8d7362272c50d36ff36fcc7c5918a98afb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h @@ -0,0 +1,401 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +// this implementation is based on CUDACachingAllocator. +// It utilizes Metal Heaps to improve the performance with buffer allocation. +// Do not include this header. Use MPSAllocatorInterface.h instead. +// TODO: Unify the logic with CUDACachingAllocator and remove redundant code. +namespace at::mps::HeapAllocator { + +static const size_t kMaxSmallAlloc = MB(1); // largest "small" allocation is 1 MiB +static const size_t kMinLargeAlloc = MB(10); // allocations between 1 and 10 MiB may use kLargeHeap +static const size_t kRoundLarge = MB(2); // round up large allocations to 2 MiB +static const size_t kSmallHeap = MB(8); // "small" allocations are packed in 8 MiB heaps +static const size_t kLargeHeap = MB(32); // "large" allocations may be packed in 32 MiB heaps +static const size_t kXLargeHeapD = MB(128); // "extra large" allocations on Discrete devices may be packed in 128 MiB heaps +static const size_t kXLargeHeapU = MB(1024); // "extra large" allocations on Unified devices may be packed in 1 GiB heaps +static const size_t kMaxScalarAlloc = (sizeof(int64_t)); // largest "scalar" allocation + +// buffer pools could be customized with a combination of usage flags +enum UsageFlags : uint32_t { + PRIVATE = 0, + SMALL = (1 << 0), // small heaps have sizes of kSmallHeap, and large ones kLargeHeap + SHARED = (1 << 1), // shared pools allocated on devices with unified memory; otherwise, private between host/device + MANAGED = (1 << 2), // managed storage mode + HAZARD = (1 << 3), // enables Automatic Hazard Tracking for the resources allocated on the pool + SCALAR = (1 << 4), // used to import CPU scalar values to GPU and use them in MPS Stream +}; +// debug verbosity flags +enum DebugVerbosity : uint32_t { + SILENT = 0, + PROFILING = (1 << 0), // print generic profiling data for total system memory usage + ALLOCATIONS = (1 << 1), // print buffer allocations + RECYCLES = (1 << 2), // print buffer recycling + RELEASES = (1 << 3), // print buffer releases + LARGE_ONLY = (1 << 4), // only log large buffer pool transactions +}; + +struct HeapBlock; + +struct BufferBlock { + id buffer; + void* cpu_ptr = nullptr; // stores the pointer to CPU mapping of a Shared MTLBuffer + size_t size; // size after alignment + size_t requested_size; // requested size (before alignment) + // buffer shape is used for retrieving base of views in cached graphs + std::vector shape; + bool in_use = false; + HeapBlock* heap; + id_t buf_id; + // counter to candidate least recently used buffers for garbage collection + uint32_t gc_count = 0; + uint32_t use_count = 0; + // counter to assign unique ids to buffer blocks + static uint64_t buffer_counter; + // Metal events used to sync GPU/CPU operations on the shared-storage buffers + MPSEventPtr event; + + BufferBlock(size_t Size, size_t RequestedSize = 0, const id Buffer = nullptr, + HeapBlock* Heap = nullptr) : + buffer(Buffer), size(Size), requested_size(RequestedSize), + heap(Heap), buf_id(Buffer ? ++buffer_counter : 0) { } + + static bool Comparator(const BufferBlock* a, const BufferBlock* b) { + return (a->size != b->size) ? a->size < b->size : (uintptr_t)a->buffer < (uintptr_t)b->buffer; + } + static size_t alignUp(size_t Size, size_t Alignment) { + assert(((Alignment - 1) & Alignment) == 0); + return ((Size + Alignment - 1) & ~(Alignment - 1)); + } + uint32_t retainCount() const { return [buffer retainCount]; } +}; +typedef bool (*BufferComparison)(const BufferBlock*, const BufferBlock*); + +struct BufferPool; +struct AllocParams { + AllocParams(size_t Alloc_Size, size_t Requested_Size, BufferPool* Pool) : + search_key(Alloc_Size), pool(Pool), requested_size(Requested_Size) { } + size_t size() const { return search_key.size; } + + BufferBlock search_key; + BufferPool* pool; + BufferBlock* buffer_block = nullptr; + size_t requested_size; + // true if we exceed the low watermark limit. In this case + // we apply strategies to relieve the pressure before allocation. + bool has_memory_pressure = false; + // true if we're allocating on a unified memory device + bool has_unified_memory = true; +}; + +struct HeapBlock { + id heap; + struct { size_t total, available; } size; + BufferPool* pool; + unsigned int n_buffers = 0; + id_t heap_id; + // indicates if we split this heap to sub-allocate 'several' buffers (otherwise single buffer) + bool is_split; + // counter to assign unique ids to heap blocks + static uint64_t heap_counter; + + HeapBlock(size_t Size, const id Heap = nullptr, BufferPool *Pool = nullptr) : + heap(Heap), size({.total = Size, .available = Size}), pool(Pool), + heap_id(Heap ? ++heap_counter : 0), is_split(true) { } + + static MTLResourceOptions getOptions(uint32_t usage) { + // TODO: check the caching performance of write-combined mode + MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache; + + if (usage & UsageFlags::MANAGED) + options |= MTLResourceStorageModeManaged; + else if (usage & UsageFlags::SHARED) + options |= MTLResourceStorageModeShared; + else + options |= MTLResourceStorageModePrivate; + + options |= (usage & UsageFlags::HAZARD) ? MTLResourceHazardTrackingModeTracked : MTLResourceHazardTrackingModeUntracked; + + return options; + } + + static HeapBlock* createHeapBlock(AllocParams& params, id device, uint32_t usage) { + HeapBlock *heapBlock = nullptr; + bool is_split = true; + const size_t size = params.size(); + MTLHeapDescriptor *d = [MTLHeapDescriptor new]; + if (d) { + const size_t kXLargeHeap = params.has_unified_memory ? kXLargeHeapU : kXLargeHeapD; + if (size <= kMaxSmallAlloc) { + d.size = kSmallHeap; + } else if (size < kMinLargeAlloc) { + d.size = kLargeHeap; + } else if (size < kXLargeHeap / 2 && !params.has_memory_pressure) { + d.size = kXLargeHeap; + } else { + d.size = kRoundLarge * ((size + kRoundLarge - 1) / kRoundLarge); + is_split = false; + } + d.storageMode = (usage & UsageFlags::SHARED) ? MTLStorageModeShared : MTLStorageModePrivate; + d.cpuCacheMode = MTLCPUCacheModeDefaultCache; + // this automatically handles Metal buffer access synchronizations at the + // cost of slightly lower performance. + d.hazardTrackingMode = (usage & UsageFlags::HAZARD) ? MTLHazardTrackingModeTracked : MTLHazardTrackingModeUntracked; + d.resourceOptions = getOptions(usage); + d.type = MTLHeapTypeAutomatic; + id heap = [device newHeapWithDescriptor: d]; + if (heap) { + [heap setPurgeableState:MTLPurgeableStateNonVolatile]; + const size_t heap_size = heapAvailableSize(heap); + heapBlock = new HeapBlock(heap_size, heap, params.pool); + if (heapBlock) { + heapBlock->is_split = is_split; + } + } + [d release]; + } + return heapBlock; + } + static bool Comparator(const HeapBlock* a, const HeapBlock* b) { + return (a->size.available != b->size.available) ? a->size.available < b->size.available : + (uintptr_t)a->heap < (uintptr_t)b->heap; + } + static NSUInteger heapAvailableSize(id heap, size_t Alignment = vm_page_size) { + return [heap maxAvailableSizeWithAlignment:Alignment]; + } + NSUInteger Size() { + return [heap size]; + } + id newMTLBuffer(size_t length, uint32_t usage) { + id buf = [heap newBufferWithLength:length options:getOptions(usage)]; + if (buf) { + updateAvailableSize(); + n_buffers++; + } + return buf; + } + // returns the retainCount before releasing the buffer + uint32_t releaseMTLBuffer(id& buffer) { + const uint32_t retainCount = [buffer retainCount]; + [buffer release]; + buffer = nil; + updateAvailableSize(); + n_buffers--; + return retainCount; + } + // returns the retainCount before releasing the heap + uint32_t releaseMTLHeap() { + const uint32_t retainCount = [heap retainCount]; + TORCH_INTERNAL_ASSERT(!n_buffers); // assert if heap isn't empty + [heap setPurgeableState:MTLPurgeableStateEmpty]; + [heap release]; + heap = nil; + size.available = 0; + return retainCount; + } + uint32_t retainCount() const { return [heap retainCount]; } + void updateAvailableSize() { size.available = heapAvailableSize(heap); } +}; +typedef bool (*HeapComparison)(const HeapBlock*, const HeapBlock*); + +struct BufferPool { + enum class Kind { + PRIVATE_SMALL, + PRIVATE_LARGE, + SHARED_SMALL, + SHARED_LARGE, + SCALAR, + }; + + BufferPool(const id Device, uint32_t Usage) : + device(Device), usage(Usage), + heaps(HeapBlock::Comparator), available_buffers(BufferBlock::Comparator) { } + + const id device; + // usage flags to customize the pool for various purposes (see UsageFlags enum) + const uint32_t usage; + // total number of buffers in the pool + uint32_t n_buffers = 0; + // total allocations size on this pool + size_t allocated_size = 0; + // total memory available in the pool + size_t available_size = 0; + // list of heaps ordered by their "available" (not total) memory size + std::set heaps; + // list of only "available" buffers in the pool (i.e., buffers not in-use) + std::set available_buffers; + // list of buffers that are in a state of "limbo" where they've already been freed + // from PyTorch-side, but were not returned to pool due to still being + // in-use by command buffers with retainCount > 1. In this state, the buffer is + // neither ready to be recycled, nor could be returned to pool as available. + // These buffers will be returned to pool once the command buffer's + // completionHandler callbacks are called. + std::unordered_set buffers_pending_free; + // list of heaps pending size update + std::unordered_set heaps_pending_update; +}; + +class MPSHeapAllocatorImpl { +public: + explicit MPSHeapAllocatorImpl() : + m_device(at::mps::MPSDevice::getInstance()->device()), + m_max_buffer_size([m_device maxBufferLength]), + m_stream(getDefaultMPSStream()), + m_event_pool(getMPSEventPool()) { + init_allocator(); + } + ~MPSHeapAllocatorImpl() { + emptyCache(); + } + // interface exposed to at::Allocator + id malloc(size_t size, uint32_t usage); + // frees a buffer and returns it into buffer pool + void free(void* ptr); + // releases all the cached buffers and their associated heaps + void emptyCache(); + // free inactive buffers that are pending to be freed + void freeInactiveBuffers(); + // returns true if buffer was allocated from the shared pool + bool isSharedBuffer(const void* ptr); + // get the requested unaligned size of an MTLBuffer + ssize_t getUnalignedBufferSize(const void* ptr); + // set the shape of a base tensor from a view tensor + void setBufferShape(const void* ptr, const IntArrayRef& shape); + // retrieve the shape of a base tensor from a view tensor + IntArrayRef getBufferShape(const void* ptr); + // get the unique ID of the buffer + id_t getBufferId(const void* ptr); + // allocate a buffer from a specialized pool to import CPU scalars into GPU + id allocScalarBufferWithValue(void* value, size_t size); + // returns a CPU-mapping of the input buffer and its retainCount, + // if only it has Shared storage-mode and allocated on MPSAllocator + std::pair getSharedBufferPtr(const void* buffer); + // records events for a list of MTLBuffers (list is used to lock the mutex once) + // returns true if records any event (given if passed buffers exist and are shared-storage) + bool recordEvents(c10::ArrayRef buffers); + // waits for the event to signal the completion of GPU execution + // on the passed shared buffers (list is used to lock the mutex once) + // returns true if actually waited on any event + bool waitForEvents(c10::ArrayRef buffers); + // this indicates how far (in Megabytes) the current total allocations are from the + // low watermark limit which is used to detect if we're under memory pressure + // This returns zero if we've reached the low watermark limit + ssize_t getLowWatermarkValue(); + // (see m_low_watermark_ratio for description) + void setLowWatermarkRatio(double ratio); + // (see m_high_watermark_ratio for description) + void setHighWatermarkRatio(double ratio); + // (see m_low_watermark_limit for description) + size_t getLowWatermarkLimit() const { return m_low_watermark_limit; } + // (see m_max_total_allowed_size for description) + size_t getHighWatermarkLimit() const { return m_max_total_allowed_size; } + // (see m_total_allocated_memory for description) + size_t getTotalAllocatedMemory() const { return m_total_allocated_memory; } + // (see m_current_allocated_memory for description) + size_t getCurrentAllocatedMemory() const { return m_current_allocated_memory; } + // total GPU memory allocated in the process by Metal driver; including + // implicit allocations from MPS/MPSGraph frameworks and MPSHeapAllocatorImpl. + size_t getDriverAllocatedMemory() const { return current_allocated_size(); } + // (see enum DebugVerbosity for description) + uint32_t getDebugVerbosity() const { return m_debug_verbosity; } + // returns the device that we allocate from + inline id Device() const { return m_device; } + + // TODO: make a common function to do size unit conversions in PyTorch. + inline std::string format_size(uint64_t size) const; + +private: + // (see m_high_watermark_ratio for description) + constexpr static double default_high_watermark_ratio = 1.7; + // we set the allowed upper bound to twice the size of recommendedMaxWorkingSetSize. + constexpr static double default_high_watermark_upper_bound = 2.0; + // (see m_low_watermark_ratio for description) + // on unified memory, we could allocate beyond the recommendedMaxWorkingSetSize + constexpr static double default_low_watermark_ratio_unified = 1.4; + constexpr static double default_low_watermark_ratio_discrete = 1.0; + + const id m_device; + std::recursive_mutex m_mutex; + // allocated buffers by device pointer + ska::flat_hash_map m_allocated_buffers; + // using a container for pools to simplify iterating them + ska::flat_hash_map> m_pools; + // total memory allocated by HeapAllocator (including blocks in pools) + size_t m_total_allocated_memory = 0; + // currently active memory allocations in use (i.e., blocks not in pools) + size_t m_current_allocated_memory = 0; + // max buffer size allowed by Metal + size_t m_max_buffer_size = 0; + // maximum total size allowed to be allocated + size_t m_max_total_allowed_size = 0; + // high watermark ratio is a hard limit for the total allowed allocations + // 0. : disables high watermark limit (may cause system failure if system-wide OOM occurs) + // 1. : recommended maximum allocation size (i.e., device.recommendedMaxWorkingSetSize) + // >1.: allows limits beyond the device.recommendedMaxWorkingSetSize + // e.g., value 0.95 means we allocate up to 95% of recommended maximum + // allocation size; beyond that, the allocations would fail with OOM error. + double m_high_watermark_ratio; + // low watermark ratio is a soft limit to attempt limiting memory allocations up to the lower watermark + // level by garbage collection or committing command buffers more frequently (a.k.a, adaptive commit). + // Value between 0 to m_high_watermark_ratio (setting 0.0 disables adaptive commit and garbage collection) + // e.g., value 0.9 means we 'attempt' to limit allocations up to 90% of recommended maximum + // allocation size. + double m_low_watermark_ratio; + // low watermark size limit (in Bytes) at the time we initialize the allocator + size_t m_low_watermark_limit; + // use "PYTORCH_DEBUG_MPS_ALLOCATOR" env-var to set debug verbosity + uint32_t m_debug_verbosity; + // default MPS stream + MPSStream* m_stream; + // we hold a reference to MPSEventPool so it could get destroyed after MPSAllocator + std::shared_ptr m_event_pool; + + void init_allocator(); + void init_buffer_pools(); + HeapBlock* get_free_heap(AllocParams& params); + bool get_free_buffer(AllocParams& params); + BufferBlock* get_allocated_buffer_block(const void* ptr); + BufferBlock* alloc_buffer_block(size_t size, uint32_t usage); + bool alloc_buffer(AllocParams& params); + void free_buffer(BufferBlock* buffer_block); + // returns true if the container heap is also released + bool release_buffer(BufferBlock* buffer_block, bool remove_empty_heap = true); + void release_buffers(BufferPool& pool); + bool release_available_cached_buffers(AllocParams& params); + bool release_cached_buffers(); + // free unused cached blocks to reclaim GPU memory if memory pressure is high + void garbage_collect_cached_buffers(AllocParams& params); + // returns the suitable buffer pool type for the usage or + // requested/allocated sizes + BufferPool& get_pool(size_t requested_size, size_t aligned_size, uint32_t usage); + // returns the aligned allocation size that is optimized + // for the buffers to get reused frequently + size_t get_allocation_size(size_t size, uint32_t usage) const; + // maximum size of device memory available for allocation in current process + // Note: the recommendedMaxWorkingSetSize is typically 75% of the total system memory. + size_t max_device_size() const { return [m_device recommendedMaxWorkingSetSize]; } + // there are implicit allocations from MPS backend, so we need to query the 'device' for + // total allocated size instead of manually tracking in MPSAllocator + size_t current_allocated_size() const { return [m_device currentAllocatedSize]; } + + bool trigger_memory_callbacks(BufferBlock* buffer_block, IMpsAllocatorCallback::EventType event) const { + for (const auto& name : MPSAllocatorCallbacksRegistry()->Keys()) { + MPSAllocatorCallbacksRegistry()->Create(name)->executeMPSAllocatorCallback(buffer_block ? buffer_block->buffer : nullptr, event); + } + return true; + } +}; + +} // namespace at::mps::HeapAllocator diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..e30a02c3fb213400eb587654b411fca2ee2b06c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h @@ -0,0 +1,61 @@ +// Copyright © 2023 Apple Inc. + +#pragma once + +#include +#include +#include + +#define MB(x) (x * 1048576UL) + +namespace at::mps { + +// this is a public interface to access MPSAllocator. +// Do not declare methods that would depend on MPS or Metal frameworks. +class IMPSAllocator : public c10::Allocator { +public: + // see the comments in MPSAllocator.h for the description of these methods. + virtual void emptyCache() const = 0; + virtual void freeInactiveBuffers() const = 0; + virtual ssize_t getUnalignedBufferSize(const void* ptr) const = 0; + virtual IntArrayRef getBufferShape(const void* ptr) const = 0; + virtual id_t getBufferId(const void* ptr) const = 0; + virtual void setBufferShape(const void* ptr, const IntArrayRef& shape) const = 0; + virtual bool isSharedBuffer(const void* ptr) const = 0; + virtual bool isSharedStorageSupported() const = 0; + virtual c10::DataPtr allocScalarBufferWithValue(void* value, size_t size) const = 0; + virtual std::string formatSize(size_t size) const = 0; + virtual void setLowWatermarkRatio(double ratio) const = 0; + virtual void setHighWatermarkRatio(double ratio) const = 0; + virtual ssize_t getLowWatermarkValue() const = 0; + virtual size_t getLowWatermarkLimit() const = 0; + virtual size_t getHighWatermarkLimit() const = 0; + virtual size_t getTotalAllocatedMemory() const = 0; + virtual size_t getCurrentAllocatedMemory() const = 0; + virtual size_t getDriverAllocatedMemory() const = 0; + virtual std::pair getSharedBufferPtr(const void* ptr) const = 0; + virtual bool recordEvents(c10::ArrayRef buffers) const = 0; + virtual bool waitForEvents(c10::ArrayRef buffers) const = 0; +}; + +class IMpsAllocatorCallback { + public: + enum class EventType { + ALLOCATED, // buffer got allocated to be used immediately + RECYCLED, // buffer pulled from free list to be reused + FREED, // buffer put to free list for future recycling + RELEASED, // buffer memory released + ALLOCATION_FAILED // buffer allocation failed + }; + virtual ~IMpsAllocatorCallback() = default; + virtual void executeMPSAllocatorCallback(void* ptr, EventType event) = 0; +}; + +// MPS allocator will execute every registered callback when a block of memory is freed. +C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback); +#define REGISTER_MPS_ALLOCATOR_CALLBACK(name, ...) \ + C10_REGISTER_CLASS(MPSAllocatorCallbacksRegistry, name, __VA_ARGS__); + +IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false); + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h new file mode 100644 index 0000000000000000000000000000000000000000..40ab07077293d16baea2ff294ed15387f4bffc91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h @@ -0,0 +1,85 @@ +// Copyright © 2022 Apple Inc. + +#pragma once +#include +#include +#include + + +#ifdef __OBJC__ +#include +#include +#include +typedef id MTLDevice_t; +typedef id MTLLibrary_t; +typedef id MTLComputePipelineState_t; +typedef id MTLLibrary_t; +#else +typedef void* MTLDevice; +typedef void* MTLDevice_t; +typedef void* MTLLibrary_t; +typedef void* MTLComputePipelineState_t; +typedef void* MTLLibrary_t; +#endif + +using namespace std; + +namespace at::mps { + +// Helper enum to check if a MPSGraph op is supported in a given macOS version +enum class MacOSVersion : uint32_t { + MACOS_VER_13_0_PLUS = 0, + MACOS_VER_13_1_PLUS, + MACOS_VER_13_2_PLUS, + MACOS_VER_13_3_PLUS, + MACOS_VER_14_0_PLUS, +}; + +//----------------------------------------------------------------- +// MPSDevice +// +// MPSDevice is a singleton class that returns the default device +//----------------------------------------------------------------- + +class TORCH_API MPSDevice { + public: + /** + * MPSDevice should not be cloneable. + */ + MPSDevice(MPSDevice& other) = delete; + /** + * MPSDevice should not be assignable. + */ + void operator=(const MPSDevice&) = delete; + /** + * Gets single instance of the Device. + */ + static MPSDevice* getInstance(); + /** + * Returns the single device. + */ + MTLDevice_t device() { + return _mtl_device; + } + /** + * Returns whether running on Ventura or newer + */ + bool isMacOS13Plus(MacOSVersion version) const; + + MTLComputePipelineState_t metalIndexingPSO(const std::string &kernel); + MTLLibrary_t getMetalIndexingLibrary(); + + ~MPSDevice(); + + private: + static MPSDevice* _device; + MTLDevice_t _mtl_device; + MTLLibrary_t _mtl_indexing_library; + MPSDevice(); +}; + +TORCH_API bool is_available(); +TORCH_API bool is_macos_13_or_newer(MacOSVersion version = MacOSVersion::MACOS_VER_13_0_PLUS); +TORCH_API at::Allocator* GetMPSAllocator(bool useSharedAllocator = false); + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..880ff1c75d12e17ecf719f3de875c2217f852e51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h @@ -0,0 +1,100 @@ +// Copyright © 2023 Apple Inc. + +#pragma once + +#include +#include +#include + +namespace at::mps { + +// NOTE: don't create instances of this class directly. +// Use MPSEventPool to acquire instances of MPSEvent. +class MPSEvent { +public: + explicit MPSEvent(id_t ID, MPSStream* stream, bool enable_timing); + ~MPSEvent(); + + // records an event on the stream + void record(bool needsLock, bool syncEvent = false); + // makes all future work submitted to the stream wait for this event. + bool wait(bool needsLock, bool syncEvent = false); + // schedules a notifyListener callback for the event. + bool notify(bool needsLock, MTLSharedEventNotificationBlock block); + // checks if events are already signaled. + bool query() const; + // blocks the CPU thread until all the GPU work that were scheduled + // prior to recording this event are completed. + bool synchronize(); + // resets this event with new parameters in case it gets reused from the event pool + void reset(MPSStream* stream, bool enable_timing); + // returns the unique ID of the event instance + id_t getID() const { return m_id; } + // returns the completion timestamp of the event + uint64_t getCompletionTime() const { return m_completion_time; } + // if already recorded, waits for cpu_sync_cv to be signaled + void waitForCpuSync(); + +private: + id_t m_id; + // enables measuring the completion time of the notifyListener of this event + bool m_enable_timing; + uint64_t m_signalCounter = 0; + MPSStream* m_stream = nullptr; + MTLSharedEvent_t m_event = nullptr; + MTLSharedEventListener* m_listener = nullptr; + // used to sync the events created on this Stream with CPU + std::mutex m_cpu_sync_mutex{}; + std::condition_variable m_cpu_sync_cv{}; + // CondVar predicate to sync the events created on this Stream with CPU + bool m_cpu_sync_completed = false; + // used to compute elapsed time + uint64_t m_completion_time = 0; + + void recordLocked(bool syncEvent); + bool waitLocked(bool syncEvent); + bool notifyLocked(MTLSharedEventNotificationBlock block); + void notifyCpuSync(); + static uint64_t getTime() { + return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW); + } +}; + +typedef std::unique_ptr> MPSEventPtr; + +class MPSEventPool { +public: + explicit MPSEventPool(MPSStream* default_stream); + ~MPSEventPool(); + + MPSEventPtr acquireEvent(bool enable_timing, MPSStream* stream); + void emptyCache(); + + // these are mainly used for MPSHooks and torch.mps.Event() bindings + id_t acquireEvent(bool enable_timing); + void releaseEvent(id_t event_id); + void recordEvent(id_t event_id, bool syncEvent); + void waitForEvent(id_t event_id, bool syncEvent); + void synchronizeEvent(id_t event_id); + bool queryEvent(id_t event_id); + // returns elapsed time between two recorded events in milliseconds + double elapsedTime(id_t start_event_id, id_t end_event_id); + +private: + MPSStream* m_default_stream = nullptr; + std::recursive_mutex m_mutex; + std::stack> m_pool{}; + // dictionary to associate event IDs with event objects + // used to retain in-use events out of the pool + // for torch.mps.Event() bindings. + std::unordered_map m_in_use_events{}; + uint64_t m_event_counter = 0; + std::function m_default_deleter; + + MPSEvent* getInUseEvent(id_t event_id, bool locked = true); +}; + +// shared_ptr is used to get MPSEventPool destroyed after dependent instances +std::shared_ptr getMPSEventPool(); + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..31cfaba98004e83c0aba84ead7f66b4e62552771 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h @@ -0,0 +1,52 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include +#include +#include + +namespace at { +namespace mps::detail { + +static const uint32_t PHILOX_STATE_N = 7; +struct rng_data_pod { + std::array state{1}; + uint64_t seed = default_rng_seed_val; +}; + +TORCH_API const Generator& getDefaultMPSGenerator(); +TORCH_API Generator createMPSGenerator(uint64_t seed_val = default_rng_seed_val); + +} // namespace mps::detail + +struct TORCH_API MPSGeneratorImpl : public c10::GeneratorImpl { + // Constructors + MPSGeneratorImpl(uint64_t seed_in = default_rng_seed_val); + ~MPSGeneratorImpl() override = default; + + // MPSGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + void set_offset(uint64_t offset) override; + uint64_t get_offset() const override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void update_philox_counters(); + + void set_engine(at::Philox4_32 engine) { engine_ = engine; }; + at::Philox4_32 engine() { return engine_; }; + uint32_t* state_data() { return data_.state.data(); } + static DeviceType device_type() { return DeviceType::MPS; }; + +private: + mps::detail::rng_data_pod data_; + at::Philox4_32 engine_; + + MPSGeneratorImpl* clone_impl() const override; +}; + +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..fe43fcf40fd34a38c9633b0c7a01e5668bfc9aba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h @@ -0,0 +1,174 @@ +// Copyright © 2022 Apple Inc. + +#pragma once +#include +#include +#include +#include +#include +#include + +#ifdef __OBJC__ +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at::mps { + +typedef MPSEvent* mpsEvent_t; + +// TODO: Move the MPSGuardImpl to inherit from NoOpDeviceGuardImpl +// https://github.com/pytorch/pytorch/issues/77170 +struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface { + static constexpr c10::DeviceType static_type = c10::DeviceType::MPS; + + // constructor + MPSGuardImpl() {} + explicit MPSGuardImpl(c10::DeviceType t) { + TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS); + } + + // returns the type + c10::DeviceType type() const override { + return c10::DeviceType::MPS; + } + + Device exchangeDevice(Device d) const override { + return Device(c10::DeviceType::MPS, 0); + } + + Device getDevice() const override { + return Device(c10::DeviceType::MPS, 0); + } + + c10::optional uncheckedGetDevice() const noexcept { + return Device(c10::DeviceType::MPS, 0); + } + + void setDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_mps()); + } + + void uncheckedSetDevice(Device d) const noexcept override { + // TODO: Currently setting only device 0 + } + + Stream getStream(Device d) const noexcept override { + return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0)); + } + + Stream getDefaultStream(Device d) const override { + return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0)); + } + + // NB: These do NOT set the current device + Stream exchangeStream(Stream s) const noexcept override { + return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0)); + } + DeviceIndex deviceCount() const noexcept override { + if (at::hasMPS()) { + //TODO: extend it for multi-device case + return 1; + } else { + return 0; + } + } + + // Event-related functions + void createEvent( + mpsEvent_t* event, + const EventFlag flag) const; + + void destroyEvent( + void* event, + const DeviceIndex device_index) const noexcept override; + + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override; + + void block( + void* event, + const Stream& stream) const override; + + bool queryEvent(void* event) const override; + +}; + +/// A variant of OptionalDeviceGuard that is specialized for MPS. +struct OptionalMPSGuard { + explicit OptionalMPSGuard() : guard_() {} + + explicit OptionalMPSGuard(c10::optional device_opt) + : guard_(device_opt) {} + + /// Set the current MPS device to the passed device index, if it is not + /// nullopt + explicit OptionalMPSGuard(c10::optional device_index_opt) + : guard_(device_index_opt) {} + + // Copy is not allowed + OptionalMPSGuard(const OptionalMPSGuard&) = delete; + OptionalMPSGuard& operator=(const OptionalMPSGuard&) = delete; + OptionalMPSGuard(OptionalMPSGuard&& other) = delete; + OptionalMPSGuard& operator=(OptionalMPSGuard&& other) = delete; + + /// Sets the MPS device to the given device, initializing the guard if it + /// is not already initialized. Errors if the given device is not a MPS + /// device. + void set_device(Device device) { + guard_.set_device(device); + } + + /// Sets the MPS device to the given device, initializing the guard if it is + /// not already initialized. Errors if the given device is not a MPS device. + void reset_device(Device device) { + guard_.reset_device(device); + } + + /// Sets the MPS device to the given device index, initializing the guard if + /// it is not already initialized. + void set_index(DeviceIndex device_index) { + guard_.set_index(device_index); + } + + /// Returns the device that was set immediately prior to initialization of the + /// guard, or nullopt if the guard is uninitialized. + c10::optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device, if the guard is initialized, + /// or nullopt if the guard is uninitialized. + c10::optional current_device() const { + return guard_.current_device(); + } + + /// Restore the original MPS device, resetting this guard to uninitialized + /// state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalDeviceGuard guard_; +}; + + +C10_REGISTER_GUARD_IMPL(MPS, MPSGuardImpl); + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..667430eaf8114f992ce5bc1dc6139065a60c838c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h @@ -0,0 +1,57 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include +#include +#include + +namespace at::mps { + +// The real implementation of MPSHooksInterface +struct MPSHooks : public at::MPSHooksInterface { + MPSHooks(at::MPSHooksArgs) {} + void initMPS() const override; + + // MPSDevice interface + bool hasMPS() const override; + bool isOnMacOSorNewer(unsigned major, unsigned minor) const override; + + // MPSGeneratorImpl interface + const Generator& getDefaultMPSGenerator() const override; + + // MPSStream interface + void deviceSynchronize() const override; + void commitStream() const override; + void* getCommandBuffer() const override; + void* getDispatchQueue() const override; + + // MPSAllocator interface + Allocator* getMPSDeviceAllocator() const override; + void emptyCache() const override; + size_t getCurrentAllocatedMemory() const override; + size_t getDriverAllocatedMemory() const override; + void setMemoryFraction(double ratio) const override; + + // MPSProfiler interface + void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const override; + void profilerStopTrace() const override; + + // MPSEvent interface + uint32_t acquireEvent(bool enable_timing) const override; + void releaseEvent(uint32_t event_id) const override; + void recordEvent(uint32_t event_id) const override; + void waitForEvent(uint32_t event_id) const override; + void synchronizeEvent(uint32_t event_id) const override; + bool queryEvent(uint32_t event_id) const override; + double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const override; + + // Compatibility with Accelerator API + bool hasPrimaryContext(DeviceIndex device_index) const override { + // When MPS is available, it is always in use for the one device. + return true; + } +}; + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h new file mode 100644 index 0000000000000000000000000000000000000000..994c50ad9e61c6b0634d80ec3921824f662de59c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h @@ -0,0 +1,393 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace at::mps { + +namespace Profiler { + +struct BaseInfo { + // profiling info types + enum class Type { + GRAPH, + KERNEL, + COPY, + CPU_FALLBACK, + }; + + BaseInfo(Type infoType, uint64_t Id, const uintptr_t Handle) : + type(infoType), profileId(Id), handle(Handle) { } + virtual ~BaseInfo() = default; + + // type of profiling info + Type type; + // unique profile ID for execution instances of operations or copies + uint64_t profileId; + // ID generated by os_signpost + // since it's possible to use event and interval-based signposts at the + // same time, we need separate IDs for each. + os_signpost_id_t eventSignpostId = 0, intervalSignpostId = 0; + // accumulated GPU time in ms (obtained from CompletionHandler's "GPUEndTime - GPUStartTime") + std::atomic totalGpuTime{0.0}; + // accumulated Scheduling time in ms (obtained from CompletionHandler's "KernelEndTime - KernelStartTime") + std::atomic totalSchedulingTime{0.0}; + // indicates if the operation or copy execution has completed + std::atomic_bool completed{false}; + // handle used to identify the profile info's instance (usually the pointer) + const uintptr_t handle; + + virtual const std::string toString(double gpuTime = 0, double schedulingTime = 0) const; + // builds a string for a tensor (format: Device:ScalarType[tensor.sizes()]) + static std::string buildTensorString(const Tensor& tensor, bool includeBufferId = false) { + if (tensor.defined()) { + std::stringstream tensorStr; + auto deviceType = tensor.device().type(); + tensorStr << c10::DeviceTypeName(deviceType); + // see comments for INCLUDE_BUFFER_ID + if (includeBufferId && deviceType == at::kMPS) { + id buffer = __builtin_bit_cast(id, tensor.storage().data()); + tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer)) + << ":" << buffer.retainCount << ")"; + } + tensorStr << ":" + << tensor.scalar_type() << tensor.sizes(); + return tensorStr.str(); + } else { + return "undefined"; + } + } + static uint64_t getTime() { + return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW); + } +}; + +struct OperationInfo : BaseInfo { + OperationInfo(const void* Handle, bool IsGraph, uint64_t Id, const std::string& StrKey) : + BaseInfo(IsGraph ? Type::GRAPH : Type::KERNEL, Id, uintptr_t(Handle)), strKey(StrKey) { } + + uint64_t runCount = 0; + std::string strKey; + + const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override; + + // builds a string for a kernel + static std::string buildKernelString(const std::string& kernelName, + const TensorList& tensors, + bool includeBufferId = false) { + std::stringstream kernelStr; + kernelStr << kernelName; + for (const Tensor& tensor: tensors) { + kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId); + } + return kernelStr.str(); + } +}; + +struct CpuFbInfo : BaseInfo { + CpuFbInfo(uint64_t Id, const std::string& OpName) : + BaseInfo(Type::CPU_FALLBACK, Id, 0), opName(OpName) { } + + uint64_t runCount = 0; + // the current and total overhead of copies in bytes required to convert the Op's + // input tensors from MPS to CPU and then output from CPU back to MPS + size_t currentCopyOverhead = 0; + size_t totalCopyOverhead = 0; + std::string opName; + std::string strKey; + uint64_t startTime = 0; + + const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override; + + void updateCopyOverhead(const TensorList& tensors) { + currentCopyOverhead = 0; + for (const Tensor& tensor: tensors) { + if (tensor.defined()) { + currentCopyOverhead += tensor.nbytes(); + } + } + totalCopyOverhead += currentCopyOverhead; + } +}; + +struct CopyInfo : BaseInfo { + enum class Kind { + MPS_TO_MPS, + MPS_TO_CPU, + CPU_TO_MPS, + }; + + CopyInfo(const void* Handle, size_t Length, uint64_t Id, bool IsNonBlocking, bool UsesBlitter) : + BaseInfo(Type::COPY, Id, uintptr_t(Handle)), kind(Kind::MPS_TO_MPS), + length(Length), isNonBlocking(IsNonBlocking), usesBlitter(UsesBlitter) { } + + Kind kind; + size_t length; + bool isNonBlocking; + bool usesBlitter; + std::string srcStrKey; + std::string dstStrKey; + // for copies that don't use blitters, we measure CPU time + uint64_t startTime = 0; + + const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override; + + static std::string buildTensorString(const void* buffer, const OptionalTensorRef tensor, bool includeBufferId = false); + + static bool isStorageOnMPS(const void* buffer, const OptionalTensorRef tensor) { + if (tensor.has_value()) { + return tensor->device().type() == at::kMPS; + } + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(buffer); + // getUnalignedBufferSize() returns -1 if input buffer is not on MPS device + return getIMPSAllocator()->getUnalignedBufferSize(buffer) >= 0; + } + + static Kind getCopyKind(const void* srcBuffer, const void* dstBuffer, + const OptionalTensorRef srcTensor, const OptionalTensorRef dstTensor) { + const bool isSrcOnMPS = isStorageOnMPS(srcBuffer, srcTensor); + const bool isDstOnMPS = isStorageOnMPS(dstBuffer, dstTensor); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isSrcOnMPS || isDstOnMPS); + if (isSrcOnMPS && !isDstOnMPS) { + return Kind::MPS_TO_CPU; + } else if (!isSrcOnMPS && isDstOnMPS) { + return Kind::CPU_TO_MPS; + } + return Kind::MPS_TO_MPS; + } +}; + +struct CopyStat : CopyInfo { + explicit CopyStat(std::string CopyKindStr) : + CopyInfo(nullptr, 0, 0, false, false), kindStr(std::move(CopyKindStr)) {} + // total number of copies + size_t totalCount = 0; + // number of Scalar copies (i.e., less than sizeof(int64)) + size_t scalarsCount = 0; + // number of blocking copies (i.e., require syncing to GPU) + size_t blockingCount = 0; + // number of copies that used memcpy(), instead of Metal Blit Encoder + size_t memcpyCount = 0; + // accumulated GPU time in ms for the scalar copies + std::atomic scalarsGpuTime{0.0}; + // copy kind in string type + std::string kindStr; +}; + +class MPSProfiler { +public: + // lower 16 bits used for profiler options + enum ProfileOptions : uint32_t { + OPTIONS_NONE = 0, + // ALL_* means, all signpost types (RUN_OPERATION|BLIT_COPY|CPU_FALLBACK, etc.) + // (used for convenience to not compute bit flags by OR-ing manually) + // trace all signpost types using events + ALL_SIGNPOST_EVENTS = (1 << 0), + // trace all signpost types using intervals + ALL_SIGNPOST_INTERVALS = (1 << 1), + // always wait for command buffer to finish executing after each commit + WAIT_UNTIL_COMPLETED = (1 << 2), + // for interval-based signposts, include the scheduling portion of + // Graph/Kernel/Copy executions as well. + // if flag is disable, only "GPU run time" is included in interval, + // and not schedule time. + INCLUDE_SCHEDULE_INTERVAL = (1 << 3), + + // use these if you need to trace signposts types individually (rarely required) + // trace signpost using intervals + USE_INTERVALS = (1 << 4), + // trace signpost by emitting events + USE_EVENTS = (1 << 5), + // used for sanity check (Change this when new option added) + OPTIONS_COUNT = (USE_EVENTS << 1) - 1, + }; + + // when adding new types, #define the type string in MPSProfiler.mm as well. + // upper 16 bits used for event types + enum SignpostTypes : uint32_t { + SIGNPOST_NONE = 0, + // trace signposts for PyTorch operation executions + RUN_OPERATION = (1 << 16), + // trace signposts for blitter copies + BLIT_COPY = (1 << 17), + // trace signposts for ops that fall back on CPU + CPU_FALLBACK = (1 << 18), + // used for sanity check (Change this when new type added) + SIGNPOST_COUNT = (CPU_FALLBACK << 1) - 1, + }; + + enum LogOptions : uint32_t { + LOG_NONE = 0, + + // Info logging options during execution + // ------------------------------------- + // prints operation info (id/key/run_count) during execution + OPERATION_INFO = (1 << 0), + // prints copy info (src/dst tensors/buffers, size, etc.) during execution + COPY_INFO = (1 << 1), + // prints CPU Fallback info (id/runCount/opName/copyOverhead) during execution + CPU_FALLBACK_INFO = (1 << 2), + + // Profiling Statistics logging options when process terminates + // ------------------------------------------------------------ + // prints all stats (OPERATION_STATS, COPY_STATS, CPU_FALLBACK_STATS) before process terminates + // this is convenient to not combine following stats bit flags manually + ALL_STATS = (1 << 3), + // prints operation stats (GPU times, run count, etc.) before process terminates + OPERATION_STATS = (1 << 4), + // prints copies stats (GPU times, copy kinds, sizes, etc.) before process terminates + COPY_STATS = (1 << 5), + // prints CPU Fallback stats (CPU times, run times, size of MPS<->CPU copies + // for tensors, etc.) before process terminates + CPU_FALLBACK_STATS = (1 << 6), + + // Metadata format options when logging the info + // --------------------------------------------- + // if enabled, includes GPU run time in metadata (i.e., GPUEndTime-GPUStartTime + // from Metal Command Buffers) (e.g., [GPU=0.324 ms]) + INCLUDE_GPU_TIME = (1 << 7), + // if enabled, includes GPU scheduling time in metadata separately + // (i.e., KernelEndTime-KernelStartTime from Metal Command Buffers) + // e.g., [GPU=0.324 ms, KRNL=0.036 ms] + INCLUDE_KERNEL_TIME = (1 << 8), + // if enabled, includes the unique buffer ID in metadata for the storage + // of a tensor that was allocated on MPSAllocator. This is useful (along with + // the EV "PYTORCH_DEBUG_MPS_ALLOCATOR") to identify buffers that are involved + // with various operations. + INCLUDE_BUFFER_ID = (1 << 9), + + // used for sanity check (Change this when new option added) + LOG_COUNT = (INCLUDE_BUFFER_ID << 1) - 1, + }; + + explicit MPSProfiler(); + ~MPSProfiler(); + + // the handle is either "MPSGraph*" or "id" for Metal Kernels + // the beginProfile*() functions return a profileId which is unique per graph/kernel/copy + uint64_t beginProfileKernel(const void* handle, const std::string& strKey, bool isGraph); + uint64_t beginProfileKernel(const void* handle, const std::string& kernelName, const TensorList& tensors); + uint64_t beginProfileCopy(const void* srcBuffer, const void* dstBuffer, + const OptionalTensorRef srcTensor, + const OptionalTensorRef dstTensor, + size_t length, bool isNonBlocking, bool usesBlitter = true); + uint64_t beginProfileCPUFallback(const std::string& opName, const TensorList& tensors); + void beginProfileGPUInterval(const void* handle); + + void endProfileCopy(uint64_t profileId, SyncType syncType); + void endProfileKernel(const void* handle, SyncType syncType = SyncType::NONE); + void endProfileCPUFallback(const std::string& opName); + + // these are used to hook into Python bindings for torch.mps.profiler module. + // this enables generating OS Signpost traces from MPSProfiler on-demand + // during runtime (instead of environment variables). + // The "mode" could be either "interval", "event", or both "interval,event" + // for interval-based and/or event-based signpost tracing. + void StartTrace(const string& mode, bool waitUntilCompleted); + void StopTrace(); + + // convenience functions to indicate whether signpost tracing or + // logging are enabled for the SignpostTypes + bool isOperationProfilingEnabled() const { + return (m_signpost_types & SignpostTypes::RUN_OPERATION) || + (m_log_options & (LogOptions::OPERATION_INFO | LogOptions::OPERATION_STATS)); + } + bool isCopyProfilingEnabled() const { + return (m_signpost_types & SignpostTypes::BLIT_COPY) || + (m_log_options & (LogOptions::COPY_INFO | LogOptions::COPY_STATS)); + } + bool isCPUFallbackProfilingEnabled() const { + return (m_signpost_types & SignpostTypes::CPU_FALLBACK) || + (m_log_options & (LogOptions::CPU_FALLBACK_INFO | LogOptions::CPU_FALLBACK_STATS)); + } + bool isSignpostTracingEnabled() const { + return (m_signpost_types != SignpostTypes::SIGNPOST_NONE); + } + + private: + // indicates what type of signpost types are enabled and traced by MPS profiler. + uint32_t m_signpost_types = 0; + uint32_t m_profile_options = 0; + uint32_t m_log_options = 0; + uint64_t m_kernel_counter = 0; + uint64_t m_graph_counter = 0; + uint64_t m_cpu_fb_counter = 0; + uint64_t m_copy_counter = 0; + // technically, it's possible to trace both events and intervals at the same time + // so we use separate os_log categories for them + os_log_t m_os_log_events; + os_log_t m_os_log_intervals; + // stats logging could run either from destructor or signal handler + // so this is used to check if logging has already started. + std::atomic_bool hasLoggedStats{false}; + // indicates there are pending completionHandler callbacks that haven't been called yet. + std::atomic_bool hasPendingCompletionHandlers{false}; + // used to capture sigint signal to log profiling stats + static struct sigaction currentSigint, previousSigint; + + // We use the following lists for two reasons: + // 1- for interval-based signposts the "begin" point won't be in same function + // as the "end" point where we need to be able to retrieve signpost's info + // 2- if Operations info need to be logged when process ends using LogOptions::OPERATION_INFO. + + // the pointer key for this map is either "MPSGraph*" or "id" for Metal Kernels + // this list is retained and could be logged along with aggregate profiling numbers when the process ends. + std::unordered_map> m_op_info_list{}; + // the string key for this map is the op name that we fall back to execute on CPU + // this list is retained and could be logged along with aggregate profiling numbers when the process ends. + std::unordered_map> m_cpu_fb_info_list{}; + // this list contains the info for copies, and its key is the unique profileId + // which is generated from m_copy_counter + // The copyInfo list is not retained. + std::unordered_map> m_copy_info_list{}; + // a short list that contains copy stats + std::unordered_map> m_copy_stat_list{}; + + void initialize(); + void beginProfileExecution(BaseInfo& info, bool cpuExecution = false); + void endProfileExecution(BaseInfo& info, os_signpost_id_t event_signpost_id, + os_signpost_id_t interval_signpost_id, + double gpuTime, double schedulingTime); + void addProfilerScheduledHandler(BaseInfo& info); + void addProfilerCompletedHandler(BaseInfo& info, SyncType syncType); + void emitSignpostEvent(SignpostTypes signpost_type, os_signpost_id_t signpost_id, + const std::string& msg) const; + void beginSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id, + const std::string& msg) const; + void endSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id) const; + + void updateCopyStats(const CopyInfo& copyInfo, double gpuTime, double schedulingTime); + // returns true if logging the profiling info "during the execution" is enabled + bool isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isExecutionEnded); + // logs all the profiling stats that are enabled + void logProfilingStats(); + // logs kernel profiling stats when the process ends. + void logOperationsProfilingStats(std::FILE* f) const; + // logs CPU Fallback profiling stats when the process ends. + void logCPUFallbackProfilingStats(std::FILE* f) const; + // logs copy profiling stats when the process ends. + void logCopyProfilingStats(std::FILE* f) const; + + os_signpost_id_t generateSignpostId(os_signpost_type_t signpostType, const void* ptr = nullptr); + static SignpostTypes getSignpostType(BaseInfo::Type infoType); + static void handleIntSignal(int signal); +}; + +} // namespace Profiler + +Profiler::MPSProfiler& getMPSProfiler(); + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h new file mode 100644 index 0000000000000000000000000000000000000000..fbaa055109042da726dd7d54a34d0d2ed4015458 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h @@ -0,0 +1,133 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include + +#include +#include +#include +#include + +#ifdef __OBJC__ +#include +#include +#include +#include +typedef id MTLCommandQueue_t; +typedef id MTLCommandBuffer_t; +typedef id MTLComputeCommandEncoder_t; +typedef id MTLSharedEvent_t; +typedef id MTLDevice_t; +#else +typedef void* MTLCommandQueue_t; +typedef void* MTLCommandQueue; +typedef void* MTLCommandBuffer_t; +typedef void* MTLCommandBuffer; +typedef void* MTLComputeCommandEncoder_t; +typedef void* MTLSharedEvent_t; +typedef void* dispatch_queue_t; +typedef void* MTLDevice_t; +#define nil NULL; +#endif + + +namespace at::mps { + +//----------------------------------------------------------------- +// MPSStream +//----------------------------------------------------------------- + +enum class SyncType { + NONE, // no commit to command buffer + COMMIT, // commit and flush the command buffer + COMMIT_AND_WAIT, // flush and wait for command buffer execution to finish + COMMIT_AND_CONTINUE,// commit and continue with a new underlying command buffer + COMMIT_ADAPTIVE, // commit adaptively based on available memory +}; + +class TORCH_API MPSStream +{ +public: + enum Unchecked { UNCHECKED }; + + /// Construct a MPSStream from a Stream. This construction is checked, + /// and will raise an error if the Stream is not, in fact, a MPS stream. + explicit MPSStream(Stream stream); + + ~MPSStream(); + MTLCommandQueue_t commandQueue() const { return _commandQueue; }; + dispatch_queue_t queue() const { return _serialQueue; } + + MPSCommandBuffer* commandBuffer(); + MTLComputeCommandEncoder_t commandEncoder(); + void endKernelCoalescing(); + void synchronize(SyncType syncType); + void fill(id buffer, uint8_t value, size_t length, size_t offset, SyncType syncType = SyncType::NONE); + void copy(id srcBuffer, id dstBuffer, + size_t length, size_t srcOffset, size_t dstOffset, + uint64_t profileId, SyncType syncType = SyncType::NONE); + void copy_and_sync(id srcBuffer, id dstBuffer, + size_t length, size_t srcOffset, size_t dstOffset, + bool non_blocking, uint64_t profileId); + void executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDictionary* results, SyncType syncType = SyncType::NONE); + void addCompletedHandler(MTLCommandBufferHandler block); + + /// Get the MPS device index that this stream is associated with. + c10::DeviceIndex device_index() const { return _stream.device_index(); } + + MTLCommandQueue_t stream() const { return _commandQueue; }; + + MTLDevice_t device() const { return [_commandQueue device];} + + /// Explicit conversion to Stream. + Stream unwrap() const { return _stream; } + +private: + Stream _stream; + MTLCommandQueue_t _commandQueue = nil; + MPSCommandBuffer* _commandBuffer = nil; + MPSCommandBuffer* _prevCommandBuffer = nil; + MTLComputeCommandEncoder_t _commandEncoder = nil; + MPSGraphExecutionDescriptor *_executionDescriptor = nil; + MPSGraphCompilationDescriptor *_compilationDescriptor = nil; + dispatch_queue_t _serialQueue = nullptr; + // CommitAndContinue is enabled by default + bool _enableCommitAndContinue = true; + + // use synchronize() to access any of these commit functions outside MPSStream + void commit(); + void commitAndWait(); + void commitAndContinue(); + void flush(); +}; + +/** + * Get the current MPS stream + */ +TORCH_API MPSStream* getCurrentMPSStream(); + +/** + * Get the default MPS stream + */ +TORCH_API MPSStream* getDefaultMPSStream(); + +//----------------------------------------------------------------- +// MPSStreamImpl +//----------------------------------------------------------------- + +class TORCH_API MPSStreamImpl +{ + public: + /** + * Gets single instance of the MPSStream. + */ + static MPSStream* getInstance(); + + private: + static MPSStream* _stream; + MPSStreamImpl(); +}; + +} // namespace at::mps diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h new file mode 100644 index 0000000000000000000000000000000000000000..964cd88ee08fe799050009c5bc2bdc5115a73bf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_conj_physical(Tensor self) -> Tensor +inline at::Tensor _conj_physical(const at::Tensor & self) { + return at::_ops::_conj_physical::call(self); +} + +// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_physical_out::call(self, out); +} +// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_physical_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6038230d73ce50265f1b16c36576095804735542 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided); +TORCH_API at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided); +TORCH_API at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e04887a892eaaeafabb15f8e6fea068d87beb03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_cos(at::TensorList self); +TORCH_API void _foreach_cos_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5d8db13a8ccddbfe2cc7ea57945ca1f107d290fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erfc_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_erfc { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_erfc_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_erfc_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_erfc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b83a897bbb9d257b6dba451911dac4a2c3097a87 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _functional_sym_constrain_range { + using schema = at::Tensor (const at::Scalar &, c10::optional, c10::optional, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_functional_sym_constrain_range") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor") + static at::Tensor call(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape.h new file mode 100644 index 0000000000000000000000000000000000000000..1dc24869a94ca278ff357898fdf33f35fb64cdc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_reshape.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor +inline at::Tensor _mkldnn_reshape(const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape::call(self, shape); +} + +// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mkldnn_reshape_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape_out::call(self, shape, out); +} +// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mkldnn_reshape_outf(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { + return at::_ops::_mkldnn_reshape_out::call(self, shape, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..082e81fa50147767e86ad8a247f5209dd1bb1bb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsc_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _to_sparse_bsc { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim); +}; + +struct TORCH_API _to_sparse_bsc_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c7f0d66ef91591d7a9866a3e9376b9640ac72299 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h new file mode 100644 index 0000000000000000000000000000000000000000..34db094ca6ef02628a1b518b66b2bf302ace05a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () +inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eac5ea58c518f4dbf8ef07e41129edc396cc4953 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); +}; + +struct TORCH_API adaptive_avg_pool2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h new file mode 100644 index 0000000000000000000000000000000000000000..1a2393390a8b83144ae12112c8cf26cd173df56f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} + +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5bcda0bd3e975d4785b134a61eefa50c282286d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); +TORCH_API at::Tensor conv_depthwise3d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8822660993a20651df2328825c579cb84e54e992 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); +TORCH_API at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..36e356bfb0924747deeb2ff34e128cbdb2f9fff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cumsum { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API cumsum_ { + using schema = at::Tensor & (at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API cumsum_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +}; + +struct TORCH_API cumsum_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +struct TORCH_API cumsum__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +struct TORCH_API cumsum_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumsum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e96e21644b3e68a7c9db9e3075961bfc131c4eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/div_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor div(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & div_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); +TORCH_API at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce2ea61127e9199526ff5c41eb2a8a81e3b3a743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aefe9658557bff07f2d23b3d04eb4b93cefe33a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a8927a8374252c8c4a556722bc837160ee860615 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); +} + +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_2d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple grid_sampler_2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..819e120a26567a7bc6bb961cadca5b7e1efb3816 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..320b0b3887a4698ce6e5eb634f4f5a0cc9c78130 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +}; + +struct TORCH_API huber_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1c850f68d1656f952cb57efd9d0aa75b83c37f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_add : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a80ef847bc4414776202fc85a10bcc0c2fc0e091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List> & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..246ae1182a9dc163ff8ab1065a515060eefe4aa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu : public at::impl::MetaBase { + + + void meta(const at::Tensor & A, bool pivot); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ecf57adf7a6ee88ce14dbb2909a5b6a4d2db8a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log2_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log2(const at::Tensor & self); +TORCH_API at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log2_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7e20156014067ad445616bd632ffe020e061e67a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API matrix_H { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::matrix_H") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "matrix_H(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d1dc8e9589101c2e7f0a7224f872fe508b1023b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_adaptive_avg_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_adaptive_avg_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +struct TORCH_API mkldnn_adaptive_avg_pool2d_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_adaptive_avg_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21d5d7e9e9491e79390f7ea48b30a1f894352094 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mkldnn_rnn_layer_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); +TORCH_API ::std::tuple mkldnn_rnn_layer_backward_outf(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00ae70a7aae59bc05cccbeffd3f6cbc821b6aac9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_rnn_layer { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::IntArrayRef, int64_t, int64_t, int64_t, bool, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train); +}; + +struct TORCH_API mkldnn_rnn_layer_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::IntArrayRef, int64_t, int64_t, int64_t, bool, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..ed4208c200152007a568c45e1028a36553e98a66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss_out::call(self, target, reduction, out); +} +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::mse_loss_out::call(self, target, reduction, out); +} + +// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss::call(self, target, reduction); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4f97d27f3c667d32332fba668737cf224b205ba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & nansum_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b2803f6083b6a493c068dfee89e59c2e7d79bafe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/polar_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API polar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::polar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "polar(Tensor abs, Tensor angle) -> Tensor") + static at::Tensor call(const at::Tensor & abs, const at::Tensor & angle); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle); +}; + +struct TORCH_API polar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::polar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3f33f5a750db2ccc73ed231794bb2bb0bb921d98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor +inline at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype); +} + +// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor +inline at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype); +} + +// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] +inline ::std::vector quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype); +} + +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out); +} +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out); +} + +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out); +} +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out); +} + +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +inline void quantize_per_tensor_out(at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); +} +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +inline void quantize_per_tensor_outf(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4465aa774fce171038b5677c963def97ae61a5bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..02a5fa77d6eb669a8a83057ff7c554fdb34d6894 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/signbit_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor signbit(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..da22683ca6c330bb7a8b9c030671075e5f7071e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_modified_bessel_i0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..89a3420e6e8d57a3e2c8c35d810fccb7e63118ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i1_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_modified_bessel_i1(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295e9658d8ffe3b812227696874143f853593190 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7fb62745e4e0b9c6e942bac48a5d172592a30a4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & to_mkldnn_out(at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & to_mkldnn_outf(const at::Tensor & self, c10::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bd7d7ce7f12b7e0d88ebd15d3c00e5a58f0f4ec9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unique_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); +}; + +struct TORCH_API unique_dim_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..389748fc411118879791a56d906eb5f18e6492d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor view_as_complex(const at::Tensor & self); +} // namespace native +} // namespace at