diff --git a/ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..a4c44c3a4c3e7f25e227c58e7e9bfb69f5122fb8 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e305378077691c1e1707ac61e6e781ba77912aebb0b739739f4a27787a6730 +size 50332828 diff --git a/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..92012840eaea0a8f1568ad1420da39da5dffe44f --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f882c18d2a4a922d2308c0a7d8da218205c905605fe03eaa8fc9b5c9d9c833 +size 9372 diff --git a/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..58e566fd0299eda510bd0d229627668be701dbc4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:552f7b41d62cf60240a4300545947e080c170e5eb6293ccc3292eba26ac46f77 +size 9387 diff --git a/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..43a1159a428f2b66874eb93fc7a5e33c2ce7b77b --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51af4b0c5807cff211952cd04d0003ed554f95d3e2ee55dd51846d5bc5d699c9 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh b/venv/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4b1dc5f8da32e759c4df8c080b8e7630ca7c4c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh @@ -0,0 +1,3 @@ +#pragma once +// TODO: Remove once torchvision has been updated to use the ATen header +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh b/venv/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..03294d0c846ed085e973ba7ae7a54d08dee1963b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh @@ -0,0 +1,3 @@ +#pragma once +// TODO: Remove this header +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/Backend.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..d298f0d697b2c24f18846a24af9df17aa4d4c145 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/Backend.h @@ -0,0 +1,388 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +/** + * This legacy enum class defines the set of backends supported by old school, + * code generated Type-based ATen. A "backend" in this sense roughly + * corresponds to the cartesian product of (device type, layout), but restricted + * only to combinations which we actually have kernels for. Backend does NOT + * include dtype. + * + * The reason we are sunsetting this enum class is because it doesn't allow for + * open registration; e.g., if you want to add SparseXLA, you'd have to + * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is + * the replacement for Backend which supports open registration. + * + * NB: The concept of 'Backend' here disagrees with the notion of backend + * exposed to users in torch.backends. Backend here is something like "CPU" + * or "SparseCUDA"; backend in torch.backends is something like "MKL" or + * "CUDNN". + */ +enum class Backend { + CPU, + CUDA, + HIP, + VE, + FPGA, + IPU, + XPU, + SparseCPU, + SparseCUDA, + SparseCsrCPU, + SparseCsrCUDA, + SparseHIP, + SparseVE, + SparseXPU, + SparsePrivateUse1, + SparseCsrHIP, + SparseCsrVE, + SparseCsrXPU, + SparseCsrPrivateUse1, + ORT, + XLA, + Vulkan, + Metal, + Meta, + QuantizedCPU, + QuantizedCUDA, + QuantizedXPU, + QuantizedPrivateUse1, + Undefined, + MkldnnCPU, + MPS, + HPU, + Lazy, + MTIA, + PrivateUse1, + NumOptions +}; + +static inline Backend dispatchKeyToBackend(DispatchKey t) { + if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) { + return Backend::CPU; + } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) { + return Backend::CUDA; + } else if (t == DispatchKey::HIP) { + return Backend::HIP; + } else if (t == DispatchKey::VE) { + return Backend::VE; + } else if (t == DispatchKey::FPGA) { + return Backend::FPGA; + } else if (t == DispatchKey::ORT) { + return Backend::ORT; + } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) { + return Backend::XLA; + } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) { + return Backend::Lazy; + } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) { + return Backend::MPS; + } else if (t == DispatchKey::Vulkan) { + return Backend::Vulkan; + } else if (t == DispatchKey::Metal) { + return Backend::Metal; + } else if (t == DispatchKey::Meta) { + return Backend::Meta; + } else if (t == DispatchKey::SparseCPU) { + return Backend::SparseCPU; + } else if (t == DispatchKey::SparseCUDA) { + return Backend::SparseCUDA; + } else if (t == DispatchKey::SparseHIP) { + return Backend::SparseHIP; + } else if (t == DispatchKey::SparseVE) { + return Backend::SparseVE; + } else if (t == DispatchKey::SparsePrivateUse1) { + return Backend::SparsePrivateUse1; + } else if (t == DispatchKey::SparseCsrCPU) { + return Backend::SparseCsrCPU; + } else if (t == DispatchKey::SparseCsrCUDA) { + return Backend::SparseCsrCUDA; + } else if (t == DispatchKey::SparseCsrHIP) { + return Backend::SparseCsrHIP; + } else if (t == DispatchKey::SparseCsrVE) { + return Backend::SparseCsrVE; + } else if (t == DispatchKey::SparseCsrPrivateUse1) { + return Backend::SparseCsrPrivateUse1; + } else if (t == DispatchKey::MkldnnCPU) { + return Backend::MkldnnCPU; + } else if (t == DispatchKey::QuantizedCPU) { + return Backend::QuantizedCPU; + } else if (t == DispatchKey::QuantizedCUDA) { + return Backend::QuantizedCUDA; + } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) { + return Backend::IPU; + } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) { + return Backend::XPU; + } else if (t == DispatchKey::SparseXPU) { + return Backend::SparseXPU; + } else if (t == DispatchKey::SparseCsrXPU) { + return Backend::SparseCsrXPU; + } else if (t == DispatchKey::QuantizedXPU) { + return Backend::QuantizedXPU; + } else if (t == DispatchKey::QuantizedPrivateUse1) { + return Backend::QuantizedPrivateUse1; + } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) { + return Backend::HPU; + } else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) { + return Backend::MTIA; + } else if ( + t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) { + return Backend::PrivateUse1; + } else if (t == DispatchKey::Undefined) { + return Backend::Undefined; + } else { + TORCH_CHECK(false, "Unrecognized tensor type ID: ", t); + } +} + +static inline DispatchKey backendToDispatchKey(Backend b) { + switch (b) { + case Backend::CPU: + return DispatchKey::CPU; + case Backend::CUDA: + return DispatchKey::CUDA; + case Backend::HIP: + return DispatchKey::HIP; + case Backend::VE: + return DispatchKey::VE; + case Backend::FPGA: + return DispatchKey::FPGA; + case Backend::ORT: + return DispatchKey::ORT; + case Backend::XLA: + return DispatchKey::XLA; + case Backend::Lazy: + return DispatchKey::Lazy; + case Backend::IPU: + return DispatchKey::IPU; + case Backend::XPU: + return DispatchKey::XPU; + case Backend::SparseXPU: + return DispatchKey::SparseXPU; + case Backend::SparseCsrXPU: + return DispatchKey::SparseCsrXPU; + case Backend::SparseCPU: + return DispatchKey::SparseCPU; + case Backend::SparseCUDA: + return DispatchKey::SparseCUDA; + case Backend::SparseHIP: + return DispatchKey::SparseHIP; + case Backend::SparseVE: + return DispatchKey::SparseVE; + case Backend::SparsePrivateUse1: + return DispatchKey::SparsePrivateUse1; + case Backend::SparseCsrCPU: + return DispatchKey::SparseCsrCPU; + case Backend::SparseCsrCUDA: + return DispatchKey::SparseCsrCUDA; + case Backend::SparseCsrHIP: + return DispatchKey::SparseCsrHIP; + case Backend::SparseCsrVE: + return DispatchKey::SparseCsrVE; + case Backend::SparseCsrPrivateUse1: + return DispatchKey::SparseCsrPrivateUse1; + case Backend::MkldnnCPU: + return DispatchKey::MkldnnCPU; + case Backend::Vulkan: + return DispatchKey::Vulkan; + case Backend::Metal: + return DispatchKey::Metal; + case Backend::Meta: + return DispatchKey::Meta; + case Backend::QuantizedCPU: + return DispatchKey::QuantizedCPU; + case Backend::QuantizedCUDA: + return DispatchKey::QuantizedCUDA; + case Backend::QuantizedPrivateUse1: + return DispatchKey::QuantizedPrivateUse1; + case Backend::Undefined: + return DispatchKey::Undefined; + case Backend::MPS: + return DispatchKey::MPS; + case Backend::HPU: + return DispatchKey::HPU; + case Backend::MTIA: + return DispatchKey::MTIA; + case Backend::PrivateUse1: + return DispatchKey::PrivateUse1; + default: + throw std::runtime_error("Unknown backend"); + } +} + +static inline DeviceType backendToDeviceType(Backend b) { + switch (b) { + case Backend::CPU: + case Backend::MkldnnCPU: + case Backend::SparseCPU: + case Backend::SparseCsrCPU: + case Backend::QuantizedCPU: + return DeviceType::CPU; + case Backend::CUDA: + case Backend::SparseCUDA: + case Backend::QuantizedCUDA: + case Backend::SparseCsrCUDA: + return DeviceType::CUDA; + case Backend::HIP: + return DeviceType::HIP; + case Backend::VE: + return DeviceType::VE; + case Backend::FPGA: + return DeviceType::FPGA; + case Backend::ORT: + return DeviceType::ORT; + case Backend::XLA: + return DeviceType::XLA; + case Backend::Lazy: + return DeviceType::Lazy; + case Backend::SparseHIP: + return DeviceType::HIP; + case Backend::SparseVE: + return DeviceType::VE; + case Backend::SparseCsrHIP: + return DeviceType::HIP; + case Backend::SparseCsrVE: + return DeviceType::VE; + case Backend::IPU: + return DeviceType::IPU; + case Backend::XPU: + case Backend::SparseXPU: + case Backend::SparseCsrXPU: + case Backend::QuantizedXPU: + return DeviceType::XPU; + case Backend::Vulkan: + return DeviceType::Vulkan; + case Backend::Metal: + return DeviceType::Metal; + case Backend::Meta: + return DeviceType::Meta; + case Backend::MPS: + return DeviceType::MPS; + case Backend::HPU: + return DeviceType::HPU; + case Backend::MTIA: + return DeviceType::MTIA; + case Backend::PrivateUse1: + case Backend::SparsePrivateUse1: + case Backend::SparseCsrPrivateUse1: + case Backend::QuantizedPrivateUse1: + return DeviceType::PrivateUse1; + case Backend::Undefined: + TORCH_CHECK(false, "Undefined backend is not a valid device type"); + default: + TORCH_CHECK(false, "Unknown backend"); + } +} + +// TODO: This probably shouldn't actually be static inline +static inline const char* toString(Backend b) { + switch (b) { + case Backend::CPU: + return "CPU"; + case Backend::CUDA: + return "CUDA"; + case Backend::HIP: + return "HIP"; + case Backend::VE: + return "VE"; + case Backend::FPGA: + return "FPGA"; + case Backend::XPU: + return "XPU"; + case Backend::IPU: + return "IPU"; + case Backend::ORT: + return "ORT"; + case Backend::XLA: + return "XLA"; + case Backend::Lazy: + return "Lazy"; + case Backend::MPS: + return "MPS"; + case Backend::SparseCPU: + return "SparseCPU"; + case Backend::SparseCUDA: + return "SparseCUDA"; + case Backend::SparseHIP: + return "SparseHIP"; + case Backend::SparseVE: + return "SparseVE"; + case Backend::SparseXPU: + return "SparseXPU"; + case Backend::SparsePrivateUse1: + return "SparsePrivateUse1"; + case Backend::SparseCsrCPU: + return "SparseCsrCPU"; + case Backend::SparseCsrCUDA: + return "SparseCsrCUDA"; + case Backend::SparseCsrHIP: + return "SparseCsrHIP"; + case Backend::SparseCsrVE: + return "SparseCsrVE"; + case Backend::SparseCsrXPU: + return "SparseCsrXPU"; + case Backend::SparseCsrPrivateUse1: + return "SparseCsrPrivateUse1"; + case Backend::MkldnnCPU: + return "MkldnnCPU"; + case Backend::Vulkan: + return "Vulkan"; + case Backend::Metal: + return "Metal"; + case Backend::Meta: + return "Meta"; + case Backend::QuantizedCPU: + return "QuantizedCPU"; + case Backend::QuantizedCUDA: + return "QuantizedCUDA"; + case Backend::QuantizedXPU: + return "QuantizedXPU"; + case Backend::QuantizedPrivateUse1: + return "QuantizedPrivateUse1"; + case Backend::HPU: + return "HPU"; + case Backend::MTIA: + return "MTIA"; + case Backend::PrivateUse1: + return "PrivateUseOne"; + default: + return "UNKNOWN_BACKEND"; + } +} + +static inline bool isSparse(Backend b) { + switch (b) { + case Backend::SparseXPU: + case Backend::SparseCPU: + case Backend::SparseCUDA: + case Backend::SparseHIP: + case Backend::SparseVE: + case Backend::SparsePrivateUse1: + return true; + default: + return false; + } +} + +static inline bool isSparseCsr(Backend b) { + switch (b) { + case Backend::SparseCsrXPU: + case Backend::SparseCsrCPU: + case Backend::SparseCsrCUDA: + case Backend::SparseCsrHIP: + case Backend::SparseCsrVE: + case Backend::SparseCsrPrivateUse1: + return true; + default: + return false; + } +} + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..98debb9db50ddffbca6ff4fc567e895d482d27c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +// TODO: rename to c10 +C10_DECLARE_bool(caffe2_report_cpu_memory_usage); + +namespace c10 { + +using MemoryDeleter = void (*)(void*); + +// A helper function that is basically doing nothing. +C10_API void NoDelete(void*); + +// A simple struct that is used to report C10's memory allocation, +// deallocation status and out-of-memory events to the profiler +class C10_API ProfiledCPUMemoryReporter { + public: + ProfiledCPUMemoryReporter() = default; + void New(void* ptr, size_t nbytes); + void OutOfMemory(size_t nbytes); + void Delete(void* ptr); + + private: + std::mutex mutex_; + std::unordered_map size_table_; + size_t allocated_ = 0; + size_t log_cnt_ = 0; +}; + +C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter(); + +// Get the CPU Allocator. +C10_API at::Allocator* GetCPUAllocator(); +// Sets the CPU allocator to the given allocator: the caller gives away the +// ownership of the pointer. +C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0); + +// Get the Default CPU Allocator +C10_API at::Allocator* GetDefaultCPUAllocator(); + +// Get the Default Mobile CPU Allocator +C10_API at::Allocator* GetDefaultMobileCPUAllocator(); + +// The CPUCachingAllocator is experimental and might disappear in the future. +// The only place that uses it is in StaticRuntime. +// Set the CPU Caching Allocator +C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0); +// Get the CPU Caching Allocator +C10_API Allocator* GetCPUCachingAllocator(); + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h new file mode 100644 index 0000000000000000000000000000000000000000..a5fbd1f3e1f3849b4585a623c37e2a5cf2a0a924 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace c10 { + +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ +template +struct CompileTimeFunctionPointer final { + static_assert( + guts::is_function_type::value, + "TORCH_FN can only wrap function types."); + using FuncType = FuncType_; + + static constexpr FuncType* func_ptr() { + return func_ptr_; + } +}; + +template +struct is_compile_time_function_pointer : std::false_type {}; +template +struct is_compile_time_function_pointer< + CompileTimeFunctionPointer> : std::true_type {}; + +} // namespace c10 + +#define TORCH_FN_TYPE(func) \ + ::c10::CompileTimeFunctionPointer< \ + std::remove_pointer_t>, \ + func> +#define TORCH_FN(func) TORCH_FN_TYPE(func)() diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h new file mode 100644 index 0000000000000000000000000000000000000000..36f41b6251c0cc64f680499c08799b1201456f3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h @@ -0,0 +1,129 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +template +bool _compute_contiguous(ArrayRef sizes, ArrayRef strides, T numel) { + bool is_contiguous = true; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(numel, 0))) { + return is_contiguous; + } + T z = 1; + // NB: make sure we do signed arithmetic + for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(strides[d], z))) { + z *= size_d; + } else { + is_contiguous = false; + break; + } + } + } + return is_contiguous; +} + +template +bool _compute_channels_last_contiguous_2d( + ArrayRef sizes, + ArrayRef strides) { + // Please don't combine these code, constant array is used here to let + // compiler fully unroll the loop to get better performance + switch (sizes.size()) { + case 4: { + T expected = 1; + for (auto& d : {1, 3, 2, 0}) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) { + return false; + } + expected *= size_d; + } + } + return true; + } + // NOLINTNEXTLINE(bugprone-branch-clone) + case 3: + // TODO dim == 3 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +bool _compute_channels_last_contiguous_3d( + ArrayRef sizes, + ArrayRef strides) { + // Please don't combine these code, constant array is used here to let + // compiler fully unroll the loop to get better performance + switch (sizes.size()) { + case 5: { + T expected = 1; + for (auto& d : {1, 4, 3, 2, 0}) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) { + return false; + } + expected *= size_d; + } + } + return true; + } + // NOLINTNEXTLINE(bugprone-branch-clone) + case 4: + // TODO dim == 4 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +bool _compute_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + auto dim = sizes.size(); + if (dim == 1) { + return sizes[0] < 2 || strides[0] == 1; + } + SmallVector perm; + perm.resize(dim); + for (const auto i : c10::irange(dim)) { + perm[i] = i; + } + // Sort by strides, leaving 0 and 1 sized dims at the end of the array + std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) { + if (sizes[a] < 2) { + return false; + } else if (sizes[b] < 2) { + return true; + } + return strides[a] < strides[b]; + }); + T require_stride = 1; + for (const auto i : c10::irange(dim)) { + const auto& size_perm_i = sizes[perm[i]]; + if (size_perm_i < 2) { + return true; + } + if (strides[perm[i]] != require_stride) { + return false; + } + require_stride *= size_perm_i; + } + return true; +} + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..284af1388ef648df356cf13f2737b784fc269a73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +struct TensorOptions; + +/// Like TensorOptions, but all fields are guaranteed to be filled. +struct DefaultTensorOptions { + DefaultTensorOptions() = default; + + caffe2::TypeMeta dtype() const noexcept { + return dtype_; + } + Device device() const noexcept { + return device_; + } + Layout layout() const noexcept { + return layout_; + } + bool requires_grad() const noexcept { + return requires_grad_; + } + + // Defined in TensorOptions.h + inline DefaultTensorOptions& merge(const TensorOptions& options); + + private: + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 64-bit + Device device_ = at::kCPU; // 32-bit + Layout layout_ = at::kStrided; // 8-bit + bool requires_grad_ = false; // 8-bit +}; + +inline const DefaultTensorOptions& getDefaultTensorOptions() { + static const auto options = DefaultTensorOptions(); + return options; +} + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..d22c015cea042eac26abd75684338f6b687e635b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h @@ -0,0 +1,199 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +/// RAII guard that sets a certain default device in its constructor, and +/// changes it back to the device that was originally active upon destruction. +/// +/// The device is always reset to the one that was active at the time of +/// construction of the guard. Even if you `set_device` after construction, the +/// destructor will still reset the device to the one that was active at +/// construction time. +/// +/// This device guard does NOT have an uninitialized state; it is guaranteed +/// to reset a device on exit. If you are in a situation where you *might* +/// want to setup a guard (i.e., are looking for the moral equivalent +/// of optional), see OptionalDeviceGuard. +class DeviceGuard { + public: + /// No default constructor; see Note [Omitted default constructor from RAII] + explicit DeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit DeviceGuard(Device device) : guard_(device) {} + + /// This constructor is for testing only. + explicit DeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + DeviceGuard(const DeviceGuard&) = delete; + DeviceGuard& operator=(const DeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + DeviceGuard(DeviceGuard&& other) = delete; + DeviceGuard& operator=(DeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + /// + /// TODO: The consistency check here is inconsistent with StreamGuard's + /// behavior with set_stream, where a stream on a different device than + /// the original one isn't an error; we just reset the stream and then + /// switch devices. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// This method is for testing only. + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type the guard was constructed with. + void set_index(DeviceIndex index) { + guard_.set_index(index); + } + + /// Returns the device that was set at the time the guard was constructed. + Device original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device. + Device current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineDeviceGuard guard_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * Morally, a OptionalDeviceGuard is equivalent to optional, but + * with extra constructors and methods as appropriate. + * + * Besides its obvious use (optionally applying a DeviceGuard), + * OptionalDeviceGuard is often also used for the following idiom: + * + * OptionalDeviceGuard g; + * for (const auto& t : tensors) { + * g.set_device(t.device()); + * do_something_with(t); + * } + * + * This usage is marginally more efficient than constructing a DeviceGuard every + * iteration of the for loop, as it avoids an unnecessary device reset. + * + * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs + * when you use the nullary constructor, or pass a nullopt to the constructor. + * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the + * original device was and they do not reset on destruction. This is why + * original_device() and current_device() return optional rather than + * Device (as they do in DeviceGuard), and also is why we didn't just + * provide OptionalDeviceGuard by default and hide DeviceGuard from users. + * + * The semantics of an OptionalDeviceGuard are exactly explained by thinking + * of it as an optional. In particular, an initialized + * OptionalDeviceGuard doesn't restore device to its value at construction; it + * restores device to its value *at initialization*. So if you have the + * program: + * + * setDevice(1); + * OptionalDeviceGuard g; + * setDevice(2); + * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes! + * + * On destruction, g will reset device to 2, rather than 1. + * + * An uninitialized OptionalDeviceGuard is distinct from a (initialized) + * DeviceGuard whose original_device_ and current_device_ match, since the + * DeviceGuard will still reset the device to original_device_. + */ +class OptionalDeviceGuard { + public: + /// Create an uninitialized guard. Set the guard later using reset_device. + explicit OptionalDeviceGuard() = default; + + /// Initialize the guard, setting the current device to the passed Device. + explicit OptionalDeviceGuard(Device device) : guard_(device) {} + + /// Initialize the guard if a Device is passed; otherwise leave the + /// guard uninitialized. + explicit OptionalDeviceGuard(optional device) : guard_(device) {} + + /// Constructor for testing only. + explicit OptionalDeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + OptionalDeviceGuard(const OptionalDeviceGuard&) = delete; + OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete; + + /// Move is disallowed + /// See Note [Explicit initialization of optional fields] + /// and // Note [Move construction for RAII guards is tricky] + /// for rationale. + OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete; + OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// For testing only + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Returns the device that was set at the time the guard was constructed. + optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via reset_device. + optional current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineOptionalDeviceGuard guard_{}; +}; + +// Note [Whither the DeviceGuard boilerplate] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Design note: in principle, we could avoid these wrappers using: +// +// using DeviceGuard = impl::InlineDeviceGuard; +// using OptionalDeviceGuard = +// impl::InlineOptionalDeviceGuard; +// +// But the error messages are worse, and our users can't just look at the +// header file to find out what's going on. Furthermore, for specializations +// like CUDAStreamGuard, it can be profitable to replace some interfaces with +// refined types (e.g., return CUDAStream instead of Stream). So, we eat +// the boilerplate and write out the API explicitly. + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h new file mode 100644 index 0000000000000000000000000000000000000000..701ea3f3bd211e1a4ac2d3be4d5aecca6ff28066 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h @@ -0,0 +1,123 @@ +#pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +#include + +#include +#include +#include +#include +#include + +namespace c10 { + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(MTIA, extra) \ + _(PrivateUse1, extra) + +enum class DeviceType : int8_t { + CPU = 0, + CUDA = 1, // CUDA. + MKLDNN = 2, // Reserved for explicit MKLDNN + OPENGL = 3, // OpenGL + OPENCL = 4, // OpenCL + IDEEP = 5, // IDEEP. + HIP = 6, // AMD HIP + FPGA = 7, // FPGA + ORT = 8, // ONNX Runtime / Microsoft + XLA = 9, // XLA / TPU + Vulkan = 10, // Vulkan + Metal = 11, // Metal + XPU = 12, // XPU + MPS = 13, // MPS + Meta = 14, // Meta (tensors with no data) + HPU = 15, // HPU / HABANA + VE = 16, // SX-Aurora / NEC + Lazy = 17, // Lazy Tensors + IPU = 18, // Graphcore IPU + MTIA = 19, // Meta training and inference devices + PrivateUse1 = 20, // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES = 21, +}; + +constexpr DeviceType kCPU = DeviceType::CPU; +constexpr DeviceType kCUDA = DeviceType::CUDA; +constexpr DeviceType kHIP = DeviceType::HIP; +constexpr DeviceType kFPGA = DeviceType::FPGA; +constexpr DeviceType kORT = DeviceType::ORT; +constexpr DeviceType kXLA = DeviceType::XLA; +constexpr DeviceType kMPS = DeviceType::MPS; +constexpr DeviceType kMeta = DeviceType::Meta; +constexpr DeviceType kVulkan = DeviceType::Vulkan; +constexpr DeviceType kMetal = DeviceType::Metal; +constexpr DeviceType kXPU = DeviceType::XPU; +constexpr DeviceType kHPU = DeviceType::HPU; +constexpr DeviceType kVE = DeviceType::VE; +constexpr DeviceType kLazy = DeviceType::Lazy; +constexpr DeviceType kIPU = DeviceType::IPU; +constexpr DeviceType kMTIA = DeviceType::MTIA; +constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1; + +// define explicit int constant +constexpr int COMPILE_TIME_MAX_DEVICE_TYPES = + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES); + +static_assert( + COMPILE_TIME_MAX_DEVICE_TYPES <= 21, + "Hey! You seem to be adding a lot of new DeviceTypes. The intent was " + "for this constant to reflect the actual number of DeviceTypes we support " + "in PyTorch; it's important that this number is not too large as we " + "use this to allocate stack arrays in some places in our code. If you " + "are indeed just adding the 20th device type, feel free to change " + "the check to 32; but if you are adding some sort of extensible device " + "types registration, please be aware that you are affecting code that " + "this number is small. Try auditing uses of this constant."); + +C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false); + +C10_API bool isValidDeviceType(DeviceType d); + +C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type); + +C10_API void register_privateuse1_backend(const std::string& backend_name); +C10_API std::string get_privateuse1_backend(bool lower_case = true); + +C10_API bool is_privateuse1_backend_registered(); + +} // namespace c10 + +namespace std { +template <> +struct hash { + std::size_t operator()(c10::DeviceType k) const { + return std::hash()(static_cast(k)); + } +}; +} // namespace std + +namespace torch { +// NOLINTNEXTLINE(misc-unused-using-decls) +using c10::DeviceType; +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..e378981c1f443a2adcfae1797e0adf17b13370b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +/** + * Note [Generator] + * ~~~~~~~~~~~~~~~~ + * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm + * to generate a seemingly random sequence of numbers, that may be later be used + * in creating a random distribution. Such an engine almost always maintains a + * state and requires a seed to start off the creation of random numbers. Often + * times, users have found it beneficial to be able to explicitly create, + * retain, and destroy PRNG states and also be able to have control over the + * seed value. + * + * A Generator in ATen gives users the ability to read, write and modify a PRNG + * engine. For instance, it does so by letting users seed a PRNG engine, fork + * the state of the engine, etc. + * + * By default, there is one generator per device, and a device's generator is + * lazily created. A user can use the torch.Generator() api to create their own + * generator. Currently torch.Generator() can only create a CPUGeneratorImpl. + */ + +/** + * Note [Acquire lock when using random generators] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Generator and its derived classes are NOT thread-safe. Please note that most + * of the places where we have inserted locking for generators are historically + * based, and we haven't actually checked that everything is truly thread safe + * (and it probably isn't). Please use the public mutex_ when using any methods + * from these classes, except for the read-only methods. You can learn about the + * usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp) + * and other places where we have used lock_guard. + * + * TODO: Look into changing the threading semantics of Generators in ATen (e.g., + * making them non-thread safe and instead making the generator state + * splittable, to accommodate forks into other threads). + */ + +namespace c10 { + +// The default seed is selected to be a large number +// with good distribution of 0s and 1s in bit representation +constexpr uint64_t default_rng_seed_val = 67280421310721; + +struct C10_API GeneratorImpl : public c10::intrusive_ptr_target { + // Constructors + GeneratorImpl(Device device_in, DispatchKeySet key_set); + + // Delete all copy and move assignment in favor of clone() + // method + GeneratorImpl(const GeneratorImpl& other) = delete; + GeneratorImpl(GeneratorImpl&& other) = delete; + GeneratorImpl& operator=(const GeneratorImpl& other) = delete; + + ~GeneratorImpl() override = default; + c10::intrusive_ptr clone() const; + + // Common methods for all generators + virtual void set_current_seed(uint64_t seed) = 0; + virtual void set_offset(uint64_t offset) = 0; + virtual uint64_t get_offset() const = 0; + virtual uint64_t current_seed() const = 0; + virtual uint64_t seed() = 0; + virtual void set_state(const c10::TensorImpl& new_state) = 0; + virtual c10::intrusive_ptr get_state() const = 0; + Device device() const; + + // See Note [Acquire lock when using random generators] + std::mutex mutex_; + + DispatchKeySet key_set() const { + return key_set_; + } + + inline void set_pyobj(PyObject* pyobj) noexcept { + pyobj_ = pyobj; + } + + inline PyObject* pyobj() const noexcept { + return pyobj_; + } + + protected: + Device device_; + DispatchKeySet key_set_; + PyObject* pyobj_ = nullptr; + + virtual GeneratorImpl* clone_impl() const = 0; +}; + +namespace detail { + +C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false); + +} // namespace detail + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..0413b9ff28482441ab13f624f87adf6bd9c01c21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h @@ -0,0 +1,218 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +class SymNodeImpl; +using SymNode = c10::intrusive_ptr; + +// When you add a method, you also need to edit +// torch/csrc/jit/python/init.cpp +// torch/csrc/utils/python_symnode.h +// c10/core/ConstantSymNodeImpl.h +class C10_API SymNodeImpl : public c10::intrusive_ptr_target { + public: + ~SymNodeImpl() override = default; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + // these could be pure virtual when we implement LTC versions + virtual bool is_int() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_bool() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_float() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_nested_int() const { + return false; + }; + virtual SymNode add(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sub(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mul(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode truediv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode pow(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floordiv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mod(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode eq(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ne(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode gt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode lt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode le(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ge(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ceil() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floor() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode neg() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_min(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_max(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_or(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_and(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_not() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) { + TORCH_CHECK(false, "NYI"); + }; + // NB: self is ignored here, only the arguments are used + virtual SymNode is_contiguous( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode clone() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_float() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode wrap_int(int64_t num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_float(double num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_bool(bool num) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t guard_int(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_bool(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual double guard_float(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_size_oblivious(const char* file, int64_t line) { + // No improvement for unbacked SymBools by default, replace this + // with a better implementation! + return guard_bool(file, line); + } + virtual bool expect_true(const char* file, int64_t line) { + // No improvement for unbacked SymBools by default, replace this + // with a better implementation! + return guard_bool(file, line); + }; + virtual bool expect_size(const char* file, int64_t line) { + // No improvement for unbacked SymInts by default, replace this + // with a better implementation! + return ge(wrap_int(0))->guard_bool(file, line); + }; + virtual int64_t int_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool bool_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool has_hint() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + virtual c10::optional nested_int() { + return c10::nullopt; + } + virtual c10::optional nested_int_coeff() { + return c10::nullopt; + } + virtual c10::optional constant_int() { + return c10::nullopt; + } + virtual c10::optional constant_bool() { + return c10::nullopt; + } + virtual c10::optional maybe_as_int() { + return c10::nullopt; + } + virtual bool is_constant() { + return false; + } + virtual bool is_symbolic() { + return true; + } + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + } +}; + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..73bcd698d44d4a5cb224ce9513a6e5434497e6d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +struct C10_API UndefinedTensorImpl final : public TensorImpl { + public: + // Without this, we get: + // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in + // device code + // (ostensibly because the constexpr tricks MSVC into trying to compile this + // function for device as well). +#ifdef _WIN32 + static inline TensorImpl* singleton() { +#else + static constexpr inline TensorImpl* singleton() { +#endif + return &_singleton; + } +#ifdef DEBUG + bool has_storage() const override; +#endif + void set_storage_offset(int64_t offset) override; + + protected: + bool is_contiguous_custom(MemoryFormat format) const override; + IntArrayRef strides_custom() const override; + SymIntArrayRef sym_strides_custom() const override; + + private: + UndefinedTensorImpl(); + static UndefinedTensorImpl _singleton; + const char* tensorimpl_type_name() const override; +}; + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf81eda1ca6f1aab39b994132347fce1e432494 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace c10 { +struct StorageImpl; +class DataPtr; +}; // namespace c10 + +namespace c10::impl::cow { + +// Creates a Copy-on-write (COW) clone of the given storage. This will also +// convert the given storage into a COW storage if it is not COW already. +// +// Converting the storage into a COW storage will not be successful if the +// storage's DataPtr has some context (`DataPtr::get_context()`) which is not +// equal to the data pointer (`DataPtr::get()`). In this case, a nullptr is +// returned. +C10_API c10::intrusive_ptr lazy_clone_storage( + StorageImpl& storage); + +// Check if a storage has a simple DataPtr with no abnormal context +C10_API bool has_simple_data_ptr(const c10::StorageImpl& storage); + +// Check if a DataPtr is COW +C10_API bool is_cow_data_ptr(const c10::DataPtr& data_ptr); + +// Eagerly copies a COW storage's data, turning it into a non-COW storage. +C10_API void materialize_cow_storage(StorageImpl& storage); + +} // namespace c10::impl::cow diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h new file mode 100644 index 0000000000000000000000000000000000000000..e26625a8c726b8e14fd519e4a5cac80514667a96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace c10::impl::cow { + +// A COWDeleterContext object is used as the `ctx` argument for DataPtr +// to implement a Copy-on-write (COW) DataPtr. +class C10_API COWDeleterContext { + public: + // Creates an instance, holding the pair of data and original + // deleter. + // + // Note that the deleter will only be called in our destructor if + // the last reference to this goes away without getting + // materialized. + explicit COWDeleterContext(std::unique_ptr data); + + // Increments the current refcount. + void increment_refcount(); + + // See README.md in this directory to understand the locking + // strategy. + + // Represents a reference to the context. + // + // This is returned by decrement_refcount to allow the caller to + // copy the data under the shared lock. + using NotLastReference = std::shared_lock; + + // Represents the last reference to the context. + // + // This will be returned by decrement_refcount when it is the last + // reference remaining and after any pending copies have completed. + using LastReference = std::unique_ptr; + + // Decrements the refcount, returning a handle indicating what to + // do with it. + std::variant decrement_refcount(); + + private: + // The destructor is hidden, this should only ever be used within + // UniqueVoidPtr using cow::delete_context as the deleter. + ~COWDeleterContext(); + + std::shared_mutex mutex_; + std::unique_ptr data_; + std::atomic refcount_ = 1; +}; + +// `cow_deleter` is used as the `ctx_deleter` for DataPtr to implement a COW +// DataPtr. +// +// Warning: This should only be called on a pointer to a COWDeleterContext that +// was allocated on the heap with `new`, because when the refcount reaches 0, +// the context is deleted with `delete`. +C10_API void cow_deleter(void* ctx); + +} // namespace c10::impl::cow diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..4e74b856614f642fcc8f0bb30c29aba89c2dcf65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h @@ -0,0 +1,337 @@ +#pragma once + +#include +#include +#include +#include + +// Just for C10_ANONYMOUS_VARIABLE +#include + +#include + +namespace c10 { + +// Forward declaration +class DataPtr; + +/** + * Flags defining the behavior of events. + * + * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The + * BACKEND_DEFAULT is what a particular backend would select if no + * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default + * choice for events on that backend, which may not be the same. For example, + * when PyTorch creates a CUDA event it sets the flag + * CUDA_EVENT_DISABLING_TIMING by default to improve performance. + * + * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each + * backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT, + * should map one-to-one with actual event flags for those backends. + */ +enum class EventFlag { + PYTORCH_DEFAULT, + BACKEND_DEFAULT, + // CUDA flags + CUDA_EVENT_DEFAULT, + CUDA_EVENT_DISABLE_TIMING, // PyTorch-default for CUDA + // HIP flags + HIP_EVENT_DEFAULT, + HIP_EVENT_DISABLE_TIMING, // PyTorch-default for HIP + // FOR TESTING ONLY + INVALID +}; + +namespace impl { + +/** + * DeviceGuardImplInterface represents the virtual interface which provides + * functionality to provide an RAII class for device and stream switching, + * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is + * expected to implement and register an implementation of this interface. + * All classes which inherit from DeviceGuardImplInterface should be declared + * 'final'. + * + * This class exists because we provide a unified interface for performing + * device guards via DeviceGuard, but we cannot assume that we have actually + * compiled against the, e.g., CUDA library, which actually implements + * this guard functionality. In this case, a dynamic dispatch is required + * to cross the library boundary. + * + * If possible, you should directly use implementations of this interface; + * those uses will be devirtualized. + */ +struct C10_API DeviceGuardImplInterface { + DeviceGuardImplInterface() = default; + DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default; + DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) = + default; + DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default; + DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept = + default; + + /** + * Return the type of device managed by this guard implementation. + */ + virtual DeviceType type() const = 0; + + /** + * Set the current device to Device, and return the previous Device. + */ + virtual Device exchangeDevice(Device) const = 0; + // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might + // consider replacing exchangeDevice with a non-virtual function with a baked + // in implementation; however, note that this will triple the number of + // virtual calls (when you implement exchangeDevice in a final subclass, + // the compiler gets to devirtualize everything; it won't do that if you don't + // define it in the subclass!) A common way to solve this problem is to use + // some sort of CRTP; however, we can template DeviceGuardImplInterface since + // we really *do* need it to be virtual. A little boilerplate seems easiest + // to explain. (Another way around this problem is to provide inline + // functions that provide the default implementations, but this seems a little + // hard to explain. In any case, we're only going to have on order of ten + // implementations of this anyway.) + + /** + * Get the current device. + */ + virtual Device getDevice() const = 0; + + /** + * Set the current device to Device. + */ + virtual void setDevice(Device) const = 0; + + /** + * Set the current device to Device, without checking for errors + * (so, e.g., this can be called from a destructor). + */ + virtual void uncheckedSetDevice(Device) const noexcept = 0; + + /** + * Get the current stream for a given device. + */ + virtual Stream getStream(Device) const noexcept = 0; + + /** + * Get the default stream for a given device. + */ + virtual Stream getDefaultStream(Device) const { + TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.") + } + + /** + * Get a stream from the global pool for a given device. + */ + virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false) + const { + (void)isHighPriority; // Suppress unused variable warning + TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.") + } + + /** + * Set a stream to be the thread local current stream for its device. + * Return the previous stream for that device. You are NOT required + * to set the current device to match the device of this stream. + */ + virtual Stream exchangeStream(Stream) const noexcept = 0; + + /** + * Destroys the given event. + */ + virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept {} + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it notifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + virtual void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const c10::EventFlag /*flag*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + virtual void block(void* /*event*/, const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + virtual bool queryEvent(void* /*event*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Get the number of devices. WARNING: This is REQUIRED to not raise + * an exception. If there is some sort of problem, e.g., driver error, + * you should report that there are zero available devices. + */ + virtual DeviceIndex deviceCount() const noexcept = 0; + + /** + * Return true if all the work previously enqueued on the stream for + * asynchronous execution has completed running on the device. + */ + virtual bool queryStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support querying streams."); + } + + /** + * Wait (by blocking the calling thread) until all the work previously + * enqueued on the stream has completed running on the device. + */ + virtual void synchronizeStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support synchronizing streams."); + } + + /** + * Ensure the caching allocator (if any) is aware that the given DataPtr is + * being used on the given stream, and that it should thus avoid recycling the + * DataPtr until all work on that stream is done. + */ + virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const { + } + + /** + * Intended use of this class is to leak the DeviceGuardImpl at program end. + * So you better not call the destructor, buster! + */ + virtual ~DeviceGuardImplInterface() = default; +}; + +// A no-op device guard impl that doesn't do anything interesting. Useful +// for devices that don't actually have a concept of device index. Prominent +// examples are CPU and Meta. +template +struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface { + NoOpDeviceGuardImpl() = default; + DeviceType type() const override { + return D; + } + Device exchangeDevice(Device) const override { + return Device(D, -1); // no-op + } + Device getDevice() const override { + return Device(D, -1); + } + void setDevice(Device) const override { + // no-op + } + void uncheckedSetDevice(Device) const noexcept override { + // no-op + } + Stream getStream(Device) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + // NB: These do NOT set the current device + Stream exchangeStream(Stream) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + DeviceIndex deviceCount() const noexcept override { + return 1; + } + + // Event-related functions + void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const EventFlag /*flag*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events."); + } + void block(void* /*event*/, const Stream& /*stream*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + bool queryEvent(void* /*event*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept override {} + + // Stream-related functions + bool queryStream(const Stream& /*stream*/) const override { + return true; + } + void synchronizeStream(const Stream& /*stream*/) const override { + // Don't wait for anything. + } +}; + +// The registry is NON-owning. Each stored pointer is std::atomic so +// that under all interleavings of registry calls the structure is +// race-free. This doesn't cost us anything on reads in X86. (An +// unsynchronized implementation probably is OK too, but I didn't want +// to prove that we never read from device_guard_impl_registry at the +// same time some registration is occurring. Shiver.) +// +// I'd like this registry to be valid even at program destruction time +// (in case someone uses a DeviceGuard in a destructor to do some cleanup +// in the CUDA API.) Since there are no direct accesses of the underlying +// owning objects which I can use to enforce initialization order (unlike +// in a Meyer singleton), it implies that you must *leak* objects when +// putting them in the registry. This is done by deleting the destructor +// on DeviceGuardImplInterface. +// NOLINTNEXTLINE(*c-arrays*) +extern C10_API std::atomic + device_guard_impl_registry[static_cast( + DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)]; + +// I can't conveniently use c10/util/Registry.h for the following reason: +// c10/util/Registry.h gives me a slow way of Create'ing a object of some +// interface from the registry, but no way of quickly accessing an already +// created object. I'll be banging on getDeviceGuardImpl every time we do a +// DeviceGuard, so I really don't want to be doing an unordered_map lookup. +// Better if the registration mechanism directly drops its implementation +// into device_guard_impl_registry. + +class C10_API DeviceGuardImplRegistrar { + public: + DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*); +}; + +#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \ + static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \ + g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl()); + +inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) { + // Two adjacent int16_t fields DeviceType and DeviceIndex has field access + // miscompiled on NVCC. To workaround this issue, we apply a mask to the + // DeviceType. First check if the DeviceType is 16-bit. + // FB employees can see + // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/ + // for more details + static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit"); + auto p = device_guard_impl_registry[static_cast(type) & 0xFF].load(); + + // This seems to be the first place where you make use of a device + // when you pass devices to factory functions. Give a nicer error + // message in this case. + TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices"); + return p; +} + +inline bool hasDeviceGuardImpl(DeviceType type) { + return device_guard_impl_registry[static_cast(type)].load(); +} + +} // namespace impl +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..c8bfe91619edca3e362ba79bd1075325738d3cc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h @@ -0,0 +1,102 @@ +#pragma once + +#include + +#include + +namespace c10::impl { + +// FakeGuardImpl is hardcoded to have eight devices. Not for +// any good reason, just to simplify code. +constexpr DeviceIndex kFakeGuardImplMaxDevices = 8; + +/** + * A fake implementation of DeviceGuardImplInterface suitable for testing. + * The current device is modeled as a mutable field in the guard implementation + * class. See DeviceGuard_test.cpp for an example use. + */ +template +struct FakeGuardImpl final : public DeviceGuardImplInterface { + static constexpr DeviceType static_type = T; + // Runtime device type is not used + FakeGuardImpl(DeviceType) {} + FakeGuardImpl() = default; + DeviceType type() const override { + return T; + } + Device exchangeDevice(Device d) const override { + AT_ASSERT(d.type() == type()); + AT_ASSERT(d.index() < kFakeGuardImplMaxDevices); + Device old_device = getDevice(); + if (old_device.index() != d.index()) { + current_device_ = d.index(); + } + return old_device; + } + Device getDevice() const override { + return Device(type(), current_device_); + } + void setDevice(Device d) const override { + AT_ASSERT(d.type() == type()); + AT_ASSERT(d.index() >= 0); + AT_ASSERT(d.index() < kFakeGuardImplMaxDevices); + current_device_ = d.index(); + } + void uncheckedSetDevice(Device d) const noexcept override { + current_device_ = d.index(); + } + Stream getStream(Device d) const noexcept override { + return Stream(Stream::UNSAFE, d, current_streams_[d.index()]); + } + Stream exchangeStream(Stream s) const noexcept override { + auto old_id = current_streams_[s.device_index()]; + current_streams_[s.device_index()] = s.id(); + return Stream(Stream::UNSAFE, s.device(), old_id); + } + DeviceIndex deviceCount() const noexcept override { + return kFakeGuardImplMaxDevices; + } + + // Event-related functions + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override {} + void block(void* event, const Stream& stream) const override {} + bool queryEvent(void* event) const override { + return true; + } + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override {} + + // Convenience methods for testing + static DeviceIndex getDeviceIndex() { + return current_device_; + } + static void setDeviceIndex(DeviceIndex i) { + AT_ASSERT(i >= 0); + AT_ASSERT(i < kFakeGuardImplMaxDevices); + current_device_ = i; + } + static StreamId getCurrentStreamIdFor(DeviceIndex i) { + return current_streams_.at(i); + } + static void resetStreams() { + current_streams_.fill(0); + } + + private: + thread_local static DeviceIndex current_device_; + thread_local static std::array + current_streams_; +}; + +template +thread_local DeviceIndex FakeGuardImpl::current_device_ = 0; + +template +thread_local std::array + FakeGuardImpl::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h new file mode 100644 index 0000000000000000000000000000000000000000..3acb875b54a32c7032d5945afac7e1137fc489e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace c10::impl { + +struct C10_API GPUTrace { + // On the x86 architecture the atomic operations are lock-less. + static std::atomic gpuTraceState; + + // When PyTorch migrates to C++20, this should be changed to an atomic flag. + // Currently, the access to this variable is not synchronized, on the basis + // that it will only be flipped once and by the first interpreter that + // accesses it. + static bool haveState; + + // This function will only register the first interpreter that tries to invoke + // it. For all of the next ones it will be a no-op. + static void set_trace(const PyInterpreter*); + + static const PyInterpreter* get_trace() { + if (!haveState) + return nullptr; + return gpuTraceState.load(std::memory_order_acquire); + } +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..741132b9f967c19a29e2d130890e0f5ef99289a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +namespace c10::impl { + +// This TLS controls whether or not we permanently associate PyObject +// with Tensor the first time it is allocated. When hermetic PyObject +// TLS is enabled (state is true), we DO NOT save PyObjects to Tensor, +// meaning you get a distinct PyObject whenever you execute the code in +// question. +struct C10_API HermeticPyObjectTLS { + static void set_state(bool state); + static bool get_state() { + // Hypothetical fastpath if torchdeploy/multipy isn't used. Per + // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf + // this qualifies relaxed access because it is a single-location data + // structure (only the boolean here). + // + // Forgetting about data races for a moment, is there a logical race? + // + // - Boolean only ever transitions from false to true. So the + // critical situation is when one interpreter is already running + // when a second interpreter switches haveState from false to true. + // + // - The first interpreter is indifferent whether or not it sees + // hasState true/false; obviously false works (this is what the + // interpreter was previously using; more directly, the interpreter + // calls into itself as the handler, so being hermetic is not + // required), and true simply means serviced python operator calls will + // be hermetic; in these cases it is expected to be functionally + // equivalent. + // + // - The second interpreter MUST see hasState true (as its requests will + // be forwarded to the first interpreter), but it is assumed that there + // is a synchronization between the interpreter initialization, and + // when we actually perform operations, so it is guaranteed to see + // hasState true. + // + // QED. + // + // This fastpath is currently disabled so that we can more easily test that + // hermetic mode works correctly even on stock build of PyTorch. + if (false && !haveState_.load(std::memory_order_relaxed)) + return false; + return get_tls_state(); + } + // Call this from the multipy/torchdeploy top level + static void init_state(); + + private: + // This only flipped once from false to true during torchdeploy/multipy + // initialization, and never again. + static std::atomic haveState_; + static bool get_tls_state(); +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..3e9f91eff6170025b54763f985e25c44adacf474 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h @@ -0,0 +1,428 @@ +#pragma once + +// This file provides implementations of InlineDeviceGuard and +// InlineOptionalDeviceGuard. + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10::impl { + +/** + * A DeviceGuard is an RAII class that sets a device to some value + * on construction, and resets the device to its original value on + * destruction. + * + * InlineDeviceGuard is a helper class for implementing DeviceGuards. + * It is templated over a DeviceGuardImpl (anything that implements + * DeviceGuardImplInterface). There are two primary ways to instantiate + * InlineDeviceGuard: + * + * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl. + * This is the best way to use InlineDeviceGuard, as all calls are + * devirtualized, giving you code as efficient as straight line + * calls to cudaGetDevice/cudaSetDevice. + * + * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl + * retrieved from a DeviceType registry. We have explicitly instantiated + * InlineDeviceGuard this way as c10::DeviceGuard. + * + * If you are in a hurry, you can use InlineDeviceGuard directly: + * + * using CUDAGuard = impl::InlineDeviceGuard; + * + * However, you can provide a better user experience if you explicitly write a + * wrapper class that itself contains the template instantiation: + * + * class CUDAGuard { + * public: + * // ... the API ... + * private: + * impl::InlineDeviceGuard guard_; + * } + * + * The wrapper class provides a good place to write documentation, and helps + * avoid weird template instantiation errors when a user incorrectly uses the + * class. + * + * If you need to test this class, consider instantiating it with FakeGuardImpl. + */ +template +class InlineDeviceGuard { + public: + // Note [Omitted default constructor from RAII] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, we could add a default constructor to + // DeviceGuard which reads the current device and promises to + // restore to that device on exit. However, most cases where you + // would have written this, you probably meant to actually just + // use OptionalDeviceGuard (since you don't actually need the + // restore to happen if you don't ever actually set the device). + // We remove the constructor here to encourage you to think about + // what you actually want to happen. + explicit InlineDeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit InlineDeviceGuard(Device device) + : impl_(device.type()), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Set the current device index to the passed DeviceIndex. (The + /// device type is inferred from the template parameter T). + template < + typename U = T, + typename = + typename std::enable_if_t>> + explicit InlineDeviceGuard(DeviceIndex device_index) + : InlineDeviceGuard(Device(U::static_type, device_index)) {} + + /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit + /// DeviceGuardImplInterface pointer. + template < + typename U = T, + typename = typename std::enable_if_t>> + explicit InlineDeviceGuard( + Device device, + const DeviceGuardImplInterface* impl) + : impl_( + VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Copy is disallowed + InlineDeviceGuard(const InlineDeviceGuard&) = delete; + InlineDeviceGuard& operator=(const InlineDeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineDeviceGuard(InlineDeviceGuard&& other) = delete; + InlineDeviceGuard& operator=(InlineDeviceGuard&& other) = delete; + + ~InlineDeviceGuard() { + impl_.uncheckedSetDevice(original_device_); + } + + /// Sets the device to the given one. + template < + typename U = T, + typename std::enable_if_t, int> = 0> + void set_device(at::Device device) { + AT_ASSERT( + (U::static_type == DeviceType::HIP && device.is_cuda()) || + device.type() == U::static_type); + auto index = device.index(); + if (index == -1) + return; + impl_.setDevice(device); + current_device_ = device; + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. This is effectively equivalent to + /// set_device when a guard supports only a single device type. + template + typename std::enable_if_t> reset_device( + at::Device device) { + set_device(device); + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). + /// + /// This method is named reset_device to highlight the fact that previous + /// device settings from this guard are NOT preserved, even if the device + /// has a different device type. For example: + /// + /// // CUDA device is 0 + /// DeviceGuard g(Device(kCUDA, 1)); + /// g.reset_device(Device(kHIP, 2)); + /// // CUDA device is 0 (!!) + /// + /// NOTE: this implementation may skip some device setting if it can prove + /// that it is unnecessary. + /// + /// Optional argument is for testing only. + template + typename std::enable_if_t> reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl = nullptr) { + auto index = device.index(); + if (index == -1) + return; + if (device.type() == original_device_.type()) { + AT_ASSERT(impl == nullptr || impl->type() == device.type()); + impl_.setDevice(device); + current_device_ = device; + } else { + // Destruct and reconstruct the DeviceGuard in place + impl_.setDevice(original_device_); + impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl); + original_device_ = impl_.exchangeDevice(device); + current_device_ = device; + } + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type. + void set_index(DeviceIndex index) { + reset_device(Device(original_device_.type(), index)); + } + + /// Returns the device that was set at the time the most recent + /// reset_device(), or otherwise the device at construction time. + Device original_device() const { + return original_device_; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return current_device_; + } + + protected: + T impl_; + + private: + Device original_device_; + Device current_device_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * + * InlineOptionalDeviceGuard is a helper class for implementing + * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to + * use this. See OptionalDeviceGuard for user-oriented usage notes. + */ +template +class InlineOptionalDeviceGuard { + public: + // Note [Explicit initialization of optional fields] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Explicit initialization of optional fields + // required to workaround an nvcc bug; see + // https://github.com/pytorch/pytorch/issues/12117 + + /// Creates an uninitialized OptionalDeviceGuard. + explicit InlineOptionalDeviceGuard() + : guard_() // See Note [Explicit initialization of optional fields] + {} + + /// Set the current device to the passed Device, if it is not nullopt. + explicit InlineOptionalDeviceGuard(optional device_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_opt.has_value()) { + guard_.emplace(device_opt.value()); + } + } + + /// Set the current device to the passed DeviceIndex, if it is not nullopt. + template < + typename U = T, + typename = + typename std::enable_if_t>> + explicit InlineOptionalDeviceGuard(optional device_index_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_index_opt.has_value()) { + guard_.emplace(device_index_opt.value()); + } + } + + /// All constructors of DeviceGuard are valid for OptionalDeviceGuard + /// and result in initialized OptionalDeviceGuard. + template + explicit InlineOptionalDeviceGuard(Args&&... args) + : guard_(std::in_place, std::forward(args)...) {} + + // TODO: Consider reading Tensor and TensorList constructors here, when + // Tensor moves to c10. (These are only valid on OptionalDeviceGuard, + // because a Tensor may be undefined, in which case we need an uninitialized + // tensor guard.) + + // Note [Move construction for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, move construction is useful for terminating + // the lifetime of a `OptionalDeviceGuard` early; for example: + // + // // current device is d0 + // OptionalDeviceGuard g1(d1); + // // current device is d1 + // { + // OptionalDeviceGuard g2(std::move(g1)); + // } + // // current device is d0!! + // + // However, it's difficult to implement the move constructor + // in a way that works in all situations. For example, consider + // the following example: + // + // OptionalDeviceGuard g1(d1); + // { + // OptionalDeviceGuard g2(d2); + // { + // OptionalDeviceGuard g3(std::move(g1)); // !!! + // } + // } + // + // What should the current device be while g3 in scope... and what + // should it be after it goes out of scope? What about g2? + // There don't seem to be satisfactory answers for these questions. + // + // It's in principle possible to raise an error when this occurs + // by doing some extra thread-local bookkeeping. But why bother? + // Just don't provide the constructor. + InlineOptionalDeviceGuard(InlineOptionalDeviceGuard&& other) = delete; + + // Note [Move assignment for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Move assignment is deleted, because you need to know which guard was + // defined "first", as that guard's original_device_ wins--with the current + // representation, we have no way of telling which is the case. (Move + // construction does not have this problem, as one guard is always + // uninitialized.) + // + // We can make this clear by way of a pair of examples: + // + // Example 1: + // + // // initial device is n0 + // { + // CUDAGuard g1(n1); + // { + // CUDAGuard g2(n2); + // // current device should be n2 + // g1 = std::move(g2); + // // current device should still be n2 + // } + // // current device should still be n2 + // } + // // current device should be n0 + // + // Example 2 (flip the order of the two guards): + // + // // initial device is n0 + // { + // CUDAGuard g2(n2); + // { + // CUDAGuard g1(n1); + // // current device should be n1 + // g1 = std::move(g2); + // // current device should be n2 + // } + // // current device should be n0 (since g2 has been vacated) + // } + // + // In both examples, we need g1 to restore to n0 after move assignment. + // However, in example 1, this is determined by the restore value of g1 + // (prior to the move). In example 2, however, it is determined by the the + // restore value of g2(!!). We don't know which one should win, without having + // a way of telling which guard was allocated first. + // + // We could solve this with an extra thread-local variable. But no one is + // actually using move-assignment. So just get rid of it. + InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) = + delete; + + /// Sets the device to the given one. Initializes OptionalDeviceGuard if it + /// is not already initialized. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void set_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->set_device(device); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). Initializes OptionalDeviceGuard if it is not already initialized. + /// + /// See notes on why this is called reset_device on InlineDeviceGuard. + /// + /// Optional argument is for testing only. + template < + typename U = T, + typename = typename std::enable_if_t>> + void reset_device( + at::Device device, + const DeviceGuardImplInterface* impl = nullptr) { + if (!guard_.has_value()) { + guard_.emplace(device, impl); + } else { + guard_->reset_device(device, impl); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. Initializes the guard if it is + /// not already initialized. This is effectively equivalent to set_device + /// when a guard supports only a single device type. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void reset_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->reset_device(device); + } + } + + /// Sets the device index to the given one. The device type is statically + /// known. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void set_index(DeviceIndex index) { + if (!guard_.has_value()) { + guard_.emplace(index); + } else { + guard_->set_index(index); + } + } + + /// Returns the device that was set immediately prior to initialization of + /// the, guard, or nullopt if the guard is uninitialized. + optional original_device() const { + return guard_.has_value() ? make_optional(guard_->original_device()) + : nullopt; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device, if the guard is initialized, + /// or nullopt if the guard is uninitialized. + optional current_device() const { + return guard_.has_value() ? make_optional(guard_->current_device()) + : nullopt; + } + + /// Restore the original device, resetting this guard to uninitialized state. + void reset() { + guard_.reset(); + } + + private: + optional> guard_; +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..ef1e2c6d6fc4cc417b3dfe609a4977e9e4cdc24e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h @@ -0,0 +1,113 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10::impl { + +template +struct InlineEvent final { + InlineEvent() = delete; + InlineEvent( + const DeviceType _device_type, + const EventFlag _flag = EventFlag::PYTORCH_DEFAULT) + : backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {} + + // Copy constructor and copy assignment operator (deleted) + InlineEvent(const InlineEvent&) = delete; + InlineEvent& operator=(const InlineEvent&) = delete; + + // Move constructor and move assignment operator + InlineEvent(InlineEvent&& other) noexcept + : event_(other.event_), + backend_(std::move(other.backend_)), + device_type_(other.device_type_), + device_index_(other.device_index_), + flag_(other.flag_), + was_marked_for_recording_(other.was_marked_for_recording_) { + other.event_ = nullptr; + } + InlineEvent& operator=(InlineEvent&& other) noexcept { + swap(other); + return *this; + } + + void swap(InlineEvent& other) noexcept { + std::swap(event_, other.event_); + std::swap(backend_, other.backend_); + std::swap(device_type_, other.device_type_); + std::swap(device_index_, other.device_index_); + std::swap(flag_, other.flag_); + std::swap(was_marked_for_recording_, other.was_marked_for_recording_); + } + + ~InlineEvent() noexcept { + if (event_) + backend_.destroyEvent(event_, device_index_); + } + + DeviceType device_type() const noexcept { + return device_type_; + } + DeviceIndex device_index() const noexcept { + return device_index_; + } + EventFlag flag() const noexcept { + return flag_; + } + bool was_marked_for_recording() const noexcept { + return was_marked_for_recording_; + } + + void recordOnce(const Stream& stream) { + if (!was_marked_for_recording_) + record(stream); + } + + void record(const Stream& stream) { + TORCH_CHECK( + stream.device_type() == device_type_, + "Event device type ", + DeviceTypeName(device_type_), + " does not match recording stream's device type ", + DeviceTypeName(stream.device_type()), + "."); + + backend_.record(&event_, stream, device_index_, flag_); + was_marked_for_recording_ = true; + device_index_ = stream.device_index(); + } + + void block(const Stream& stream) const { + if (!was_marked_for_recording_) + return; + + TORCH_CHECK( + stream.device_type() == device_type_, + "Event device type ", + DeviceTypeName(device_type_), + " does not match blocking stream's device type ", + DeviceTypeName(stream.device_type()), + "."); + + backend_.block(event_, stream); + } + + bool query() const { + if (!was_marked_for_recording_) + return true; + return backend_.queryEvent(event_); + } + + private: + void* event_ = nullptr; + T backend_; + DeviceType device_type_; + DeviceIndex device_index_ = -1; + EventFlag flag_ = EventFlag::PYTORCH_DEFAULT; + bool was_marked_for_recording_ = false; +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h new file mode 100644 index 0000000000000000000000000000000000000000..ef4acbb551115bad3e9a3b9274611e116e9a342f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h @@ -0,0 +1,162 @@ +#pragma once + +#include +#include + +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). + +namespace c10::impl { + +// POD version of LocalDispatchKeySet. Declared here just so that +// we can put it in the guards. +// This struct encapsulates special handling for TLS initialization +// in set_included()/included() API so that they reflect the truth. +// If you want to create PODLocalDispatchKeySet with non-zero state, +// use set_included() instead of default constructor. +struct C10_API PODLocalDispatchKeySet { + uint64_t included_; + uint64_t excluded_; + + // See Note [TLS Initialization] + DispatchKeySet included() const { + return DispatchKeySet(DispatchKeySet::RAW, included_) ^ + c10::default_included_set; + } + DispatchKeySet excluded() const { + return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^ + c10::default_excluded_set; + } + + void set_included(DispatchKeySet x) { + included_ = (x ^ c10::default_included_set).raw_repr(); + } + void set_excluded(DispatchKeySet x) { + excluded_ = (x ^ c10::default_excluded_set).raw_repr(); + } +}; +static_assert( + std::is_trivial_v, + "PODLocalDispatchKeySet must be a POD type."); + +struct C10_API LocalDispatchKeySet { + /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x) + : included_(x.included()), excluded_(x.excluded()) {} + DispatchKeySet included_; + DispatchKeySet excluded_; +}; + +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +#if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +C10_API LocalDispatchKeySet tls_local_dispatch_key_set(); +#else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set; + +inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() { + // Don't let people fiddle with the thread_local directly just + // because they include this header. + return raw_local_dispatch_key_set; +} +#endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// Internal, use ThreadLocalStateGuard +C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set); + +// RAII API for manipulating the thread-local dispatch state. + +class C10_API IncludeDispatchKeyGuard { + public: + IncludeDispatchKeyGuard(DispatchKeySet); + IncludeDispatchKeyGuard(DispatchKey k) + : IncludeDispatchKeyGuard(DispatchKeySet(k)) {} + IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete; + IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete; + ~IncludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet include_; +}; + +class C10_API ExcludeDispatchKeyGuard { + public: + ExcludeDispatchKeyGuard(DispatchKeySet); + ExcludeDispatchKeyGuard(DispatchKey k) + : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {} + ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete; + ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete; + ~ExcludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet exclude_; +}; + +struct C10_API ForceDispatchKeyGuard { + public: + ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set) + : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) { + c10::impl::_force_tls_local_dispatch_key_set(key_set); + } + ForceDispatchKeyGuard( + c10::DispatchKeySet include, + c10::DispatchKeySet exclude) + : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) { + auto updated_set = saved_keyset_; + updated_set.included_ = include; + updated_set.excluded_ = exclude; + c10::impl::_force_tls_local_dispatch_key_set(updated_set); + } + ~ForceDispatchKeyGuard() { + c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_); + } + + private: + c10::impl::LocalDispatchKeySet saved_keyset_; +}; + +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +C10_API bool tls_is_dispatch_key_excluded(DispatchKey x); +C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_key_included(DispatchKey x); +C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks); +C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks); + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..e1aaa23ea05e87430d0569e3bb556a344f9ef408 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Forward declarations + +namespace c10 { +struct IValue; +class OperatorHandle; +struct TensorImpl; +} // namespace c10 + +namespace torch::jit { +using Stack = std::vector; +} + +// Actual implementation + +namespace c10::impl { + +struct C10_API PyInterpreter; + +// Note [Python interpreter tag] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Traditionally, PyTorch is layered such that our Python library +// (libtorch_python) references our pure C++ library (libtorch) as the +// natural order of things. However, sometimes this natural order is +// subverted: C++ objects refer to Python objects (for example, we +// store a PyObject* pointer on TensorImpl so that converting from a +// C++ Tensor to a Python Tensor is just a memory dereference). +// +// These unusual orderings must be treated with care. To start, you need to +// virtualize the destructor so that the PyObject can be decref'ed on +// destruction (because the C++ object itself doesn't know anything about +// Python--remember, layering!). This process itself is fraught, since +// acquiring the GIL could lead to deadlocks if someone is blocking on you +// while holding the GIL. Furthermore, if the C++ objects outlive the +// interpreter (which can happen if you stash them in a static global +// variable defined in libtorch), you may attempt to decref the object when +// the Python interpreter has already been shutdown. +// +// BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python +// interpreters in a single process. If a C++ object is accessible from +// multiple interpreters, we must take care not to accidentally pass a +// PyObject from one interpreter with another interpreter. +// +// To prevent these mixups, we introduce a PyInterpreter "tag" (object with +// a vtable), which specifies a specific Python interpreter. +// +// - Any given object can be associated with AT MOST one Python interpreter. +// We represent the interpreter tag as a memory address to an instance of +// a virtual class that is allocated once per interpreter (this is so that +// we can request the interpreter to perform operations for us, if +// necessary). +// +// - It can be recorded with a PyObject (PyInterpreterObject) so that +// we know what interpreter the object is associated with, and we can +// raise an error if you try to use the PyObject from the wrong +// interpreter context. +// +// - It contains a vtable that can be used to perform various Python +// operations from ordinary C++ code that ordinarily wouldn't be accessible +// from libtorch. +// +// A simple use case is when a C++ object must be associated with a PyObject. +// However, for TensorImpl, we lazily allocate a PyObject the first time the +// object passes into Python. The invariants for this situation are more +// subtle: +// +// - A given TensorImpl's interpreter tag can only go from uninitialized to +// tagged; once tagged, this is a quiescent state (once tagged to an +// interpreter, ALWAYS tagged to that interpreter) +// +// - A thread may mutate the PyObject field of a TensorImpl if and only if it +// holds the GIL for the interpreter tagged on the TensorImpl. (If the +// TensorImpl is not tagged, it must first atomically claim its tag before it +// can validly write) +// +// WARNING: This class has to be written very carefully, because it may be +// possible for a Tensor to have a reference an interpreter corresponding to +// a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling +// virtual methods very dangerous, because the vtable may be garbage at that +// point (on a good day, you might get "pure virtual method called"). +// +// The idea to solve this problem is we always leak PyInterpreters (so they +// always stay live even after dlclose), and make sure we can disarm their +// virtual methods by indirecting through a separate PyInterpreterVTable +// object. This can be replaced with a no-op vtable from libc10.so, which +// is guaranteed to stick around until the bitter end. +// +// NB: The downside with representing PyInterpreter tags as full objects is that +// it takes an extra word on TensorImpl. If tags were instead just integer +// indices, on 64-bit architectures we could pack the tag and PyObject together +// into a single atomic word. On 32-bit architectures we could simply say that +// only one Python interpreter is supported (erroring if a nontrivial +// interpreter tag is attempted to be set). +// +// The difficulty with this scheme is we need to maintain an out-of-line table +// to get at the PyInterpreters so that we can do virtual method calls on them, +// and registration/deregistration to this table must be done in a thread safe +// manner. This can be easily done if the number of possible PyInterpreters is +// small enough (e.g., 8-bit integer) by simply preallocating an array of +// sufficient size to hold all possible interpreters. Surely 128 threads is +// more than enough for anyone! +// +// I didn't decide to do this technique at the moment, because the extra word +// added by the PyInterpreter tag takes us to 24 words, which means that we +// still fit inside three eight word cache lines. If you need to penny pinch +// another word consider doing this! + +struct C10_API PyInterpreterVTable { + virtual ~PyInterpreterVTable() = default; + + // Report the name of this interpreter + virtual std::string name() const = 0; + + // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call + // See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg] + virtual void decref(PyObject* pyobj, bool has_pyobj_slot) const = 0; + + // Perform a detach by deferring to the __torch_dispatch__ implementation of + // detach, which will also arrange for the PyObject to get copied in this + // situation + virtual c10::intrusive_ptr detach( + const TensorImpl* self) const = 0; + + // Invoke the Python boxed fallback dispatch to go back into Python + virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack) + const = 0; + + virtual void reportErrorCallback(PyObject* callback, DispatchKey key) + const = 0; + + // This is only invoked in the multipy/torchdeploy situation from + // pythonOpRegistrationTrampoline; this lets us get to the Python + // interpreter to actually find the appropriate Python op registration + // entry to call. + virtual void python_op_registration_trampoline( + const c10::OperatorHandle& op, + c10::DispatchKey, + torch::jit::Stack* stack) const = 0; + + virtual void throw_abstract_impl_not_imported_error( + std::string opname, + const char* pymodule, + const char* context) const = 0; + + // Invoke the Python dispatcher to handle this call + virtual void python_dispatcher( + const c10::OperatorHandle& op, + c10::DispatchKeySet, + torch::jit::Stack* stack) const = 0; + + virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat) + const = 0; + virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat) + const = 0; + virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0; + virtual c10::Device device(const TensorImpl* self) const = 0; + virtual int64_t dim(const TensorImpl* self) const = 0; + virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0; + virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0; + virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0; + virtual c10::Layout layout(const TensorImpl* self) const = 0; + virtual int64_t numel(const TensorImpl* self) const = 0; + virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0; + virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0; + virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0; + + virtual void trace_gpu_event_creation(uintptr_t event) const = 0; + virtual void trace_gpu_event_deletion(uintptr_t event) const = 0; + virtual void trace_gpu_event_record(uintptr_t event, uintptr_t stream) + const = 0; + virtual void trace_gpu_event_wait(uintptr_t event, uintptr_t stream) + const = 0; + virtual void trace_gpu_memory_allocation(uintptr_t ptr) const = 0; + virtual void trace_gpu_memory_deallocation(uintptr_t ptr) const = 0; + virtual void trace_gpu_stream_creation(uintptr_t stream) const = 0; + virtual void trace_gpu_device_synchronization() const = 0; + virtual void trace_gpu_stream_synchronization(uintptr_t stream) const = 0; + virtual void trace_gpu_event_synchronization(uintptr_t event) const = 0; + + virtual void reset_backward_hooks(const TensorImpl* self) const = 0; +}; + +struct C10_API PyInterpreter { + const PyInterpreterVTable* vtable_; + + PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){}; + + const PyInterpreterVTable& operator*() const noexcept { + return *vtable_; + } + const PyInterpreterVTable* operator->() const noexcept { + return vtable_; + } + + // Disarm this PyInterpreter, making all of its methods noops. + // The vtable pointer is not an atomic at the moment, which means + // a disarm() invocation that is concurrent with active destructors + // is not thread safe and will trigger TSAN. My hope is that this + // situations doesn't ever actually happen; tensor destruction should + // quiesce when a dlclose happens, and any long lived tensors whose + // destructors would be disarmed here only begin the destruction process + // on process shutdown (long after the dlclose has occurred). + void disarm() noexcept; +}; + +// PyInterpreterStatus describes what the state of its interpreter tag +// is, relative to the thread currently holding the GIL. +enum class PyInterpreterStatus { + // We just allocated the Tensor, it hasn't escaped to other threads, + // we know that it definitely hasn't been tagged to be associated + // with an interpreter. + DEFINITELY_UNINITIALIZED, + // We queried the interpreter field and it looked uninitialized. But + // another thread may have raced with us to tag it with some other + // interpreter id. So we will have to do a CEX to make sure we can + // actually nab it. + MAYBE_UNINITIALIZED, + // We queried the interpreter field and it was tagged to belong to us. + // This means we have sole write access (as we hold the GIL for this + // interpreter) + TAGGED_BY_US, + // Someone else tagged this. We can't use this TensorImpl from Python. + TAGGED_BY_OTHER, +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..9016c3e11e1579feb48058a928c3dc033d7d0d24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +namespace c10::impl { + +struct C10_API PythonDispatcherTLS { + static void set_state(PyInterpreter* state); + static PyInterpreter* get_state(); + static void reset_state(); +}; + +struct C10_API DisablePythonDispatcher { + DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) { + PythonDispatcherTLS::set_state({}); + } + ~DisablePythonDispatcher() { + PythonDispatcherTLS::set_state(old_); + } + PyInterpreter* old_; +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h new file mode 100644 index 0000000000000000000000000000000000000000..827da598d8e5a77384c5f24af6e6dd3d08cbb2fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h @@ -0,0 +1,315 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5 + +namespace c10::impl { + +// Packed container for TensorImpl sizes and strides. +// This design improves on the previous approach of using a pair of +// c10::SmallVector by specializing for the operations we +// actually use and enforcing that the number of sizes is the same as +// the number of strides. The memory layout is as follows: +// +// 1 size_t for the size +// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer +// to out-of-line array +class C10_API SizesAndStrides { + public: + // TODO: different iterator types for sizes & strides to prevent + // mixing the two accidentally. + using sizes_iterator = int64_t*; + using sizes_const_iterator = const int64_t*; + using strides_iterator = int64_t*; + using strides_const_iterator = const int64_t*; + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + SizesAndStrides() { + size_at_unchecked(0) = 0; + stride_at_unchecked(0) = 1; + } + + ~SizesAndStrides() { + if (C10_UNLIKELY(!isInline())) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + free(outOfLineStorage_); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) { + if (C10_LIKELY(rhs.isInline())) { + copyDataInline(rhs); + } else { + allocateOutOfLineStorage(size_); + copyDataOutline(rhs); + } + } + + SizesAndStrides& operator=(const SizesAndStrides& rhs) { + if (this == &rhs) { + return *this; + } + if (C10_LIKELY(rhs.isInline())) { + if (C10_UNLIKELY(!isInline())) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + free(outOfLineStorage_); + } + copyDataInline(rhs); + } else { + if (isInline()) { + allocateOutOfLineStorage(rhs.size_); + } else { + resizeOutOfLineStorage(rhs.size_); + } + copyDataOutline(rhs); + } + size_ = rhs.size_; + return *this; + } + + // Move from rhs. rhs.size() == 0 afterwards. + SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) { + if (C10_LIKELY(isInline())) { + memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_)); + } else { + outOfLineStorage_ = rhs.outOfLineStorage_; + rhs.outOfLineStorage_ = nullptr; + } + + rhs.size_ = 0; + } + + // Move from rhs. rhs.size() == 0 afterwards. + SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept { + if (this == &rhs) { + return *this; + } + if (C10_LIKELY(rhs.isInline())) { + if (C10_UNLIKELY(!isInline())) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + free(outOfLineStorage_); + } + copyDataInline(rhs); + } else { + // They're outline. We're going to steal their vector. + if (!isInline()) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + free(outOfLineStorage_); + } + outOfLineStorage_ = rhs.outOfLineStorage_; + rhs.outOfLineStorage_ = nullptr; + } + size_ = rhs.size_; + rhs.size_ = 0; + + return *this; + } + + size_t size() const noexcept { + return size_; + } + + const int64_t* sizes_data() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[0]; + } else { + return &outOfLineStorage_[0]; + } + } + + int64_t* sizes_data() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[0]; + } else { + return &outOfLineStorage_[0]; + } + } + + sizes_const_iterator sizes_begin() const noexcept { + return sizes_data(); + } + + sizes_iterator sizes_begin() noexcept { + return sizes_data(); + } + + sizes_const_iterator sizes_end() const noexcept { + return sizes_begin() + size(); + } + + sizes_iterator sizes_end() noexcept { + return sizes_begin() + size(); + } + + IntArrayRef sizes_arrayref() const noexcept { + return IntArrayRef{sizes_data(), size()}; + } + + void set_sizes(IntArrayRef newSizes) { + resize(newSizes.size()); + std::copy(newSizes.begin(), newSizes.end(), sizes_begin()); + } + + void set_strides(IntArrayRef strides) { + TORCH_INTERNAL_ASSERT(strides.size() == size()); + std::copy(strides.begin(), strides.end(), strides_begin()); + } + + const int64_t* strides_data() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + int64_t* strides_data() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_const_iterator strides_begin() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_iterator strides_begin() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_const_iterator strides_end() const noexcept { + return strides_begin() + size(); + } + + strides_iterator strides_end() noexcept { + return strides_begin() + size(); + } + + IntArrayRef strides_arrayref() const noexcept { + return IntArrayRef{strides_data(), size()}; + } + + // Size accessors. + int64_t size_at(size_t idx) const noexcept { + assert(idx < size()); + return sizes_data()[idx]; + } + + int64_t& size_at(size_t idx) noexcept { + assert(idx < size()); + return sizes_data()[idx]; + } + + int64_t size_at_unchecked(size_t idx) const noexcept { + return sizes_data()[idx]; + } + + int64_t& size_at_unchecked(size_t idx) noexcept { + return sizes_data()[idx]; + } + + // Size accessors. + int64_t stride_at(size_t idx) const noexcept { + assert(idx < size()); + return strides_data()[idx]; + } + + int64_t& stride_at(size_t idx) noexcept { + assert(idx < size()); + return strides_data()[idx]; + } + + int64_t stride_at_unchecked(size_t idx) const noexcept { + return strides_data()[idx]; + } + + int64_t& stride_at_unchecked(size_t idx) noexcept { + return strides_data()[idx]; + } + + void resize(size_t newSize) { + const auto oldSize = size(); + if (newSize == oldSize) { + return; + } + if (C10_LIKELY( + newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) { + if (oldSize < newSize) { + const auto bytesToZero = + (newSize - oldSize) * sizeof(inlineStorage_[0]); + memset(&inlineStorage_[oldSize], 0, bytesToZero); + memset( + &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize], + 0, + bytesToZero); + } + size_ = newSize; + } else { + resizeSlowPath(newSize, oldSize); + } + } + + void resizeSlowPath(size_t newSize, size_t oldSize); + + private: + bool isInline() const noexcept { + return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE; + } + + void copyDataInline(const SizesAndStrides& rhs) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline()); + memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_)); + } + + static size_t storageBytes(size_t size) noexcept { + return size * 2 * sizeof(int64_t); + } + + void allocateOutOfLineStorage(size_t size) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + outOfLineStorage_ = static_cast(malloc(storageBytes(size))); + TORCH_CHECK( + outOfLineStorage_, + "Could not allocate memory for Tensor SizesAndStrides!"); + } + + void resizeOutOfLineStorage(size_t newSize) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline()); + outOfLineStorage_ = static_cast( + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + realloc(outOfLineStorage_, storageBytes(newSize))); + TORCH_CHECK( + outOfLineStorage_, + "Could not allocate memory for Tensor SizesAndStrides!"); + } + + void copyDataOutline(const SizesAndStrides& rhs) noexcept { + memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_)); + } + + size_t size_{1}; + union { + int64_t* outOfLineStorage_; + // NOLINTNEXTLINE(*c-array*) + int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{}; + }; +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..50a92459e61e9fc0386abdef2e75f7473c640d91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include + +namespace c10::impl { + +enum class TorchDispatchModeKey : int8_t { + FAKE, + PROXY, + FUNCTIONAL, + NUM_MODE_KEYS +}; + +struct C10_API TorchDispatchModeTLS { + // This API is NOT invariant safe. + // It must not take in an infra mode that uses TorchDispatchModeKey + // If you're pushing an infra mode onto the stack, we expect + // you to use set_mode + static void push_non_infra_mode_onto_stack( + std::shared_ptr mode); + // Pops the top mode of the stack, + // giving precedence to user modes before attempting to pop + // any infra modes + static const std::shared_ptr pop_stack(); + // Returns the highest-priority infra mode on the stack, + // along with its mode key. + static const std::tuple, TorchDispatchModeKey> + pop_highest_infra_mode(); + + static const std::shared_ptr& get_stack_at(int64_t idx); + static int64_t stack_len(); + + static const c10::optional> get_mode( + TorchDispatchModeKey mode_key); + static const c10::optional> unset_mode( + TorchDispatchModeKey mode_key); + static void set_mode( + const std::shared_ptr& mode, + TorchDispatchModeKey mode_key); + + static const TorchDispatchModeTLS& get_state(); + static void set_state(TorchDispatchModeTLS state); + + static bool any_modes_set(bool skip_infra_modes = false); + + private: + std::vector> stack_; + // Users are allowed to push multiple ProxyTorchDispatchMode objects onto the + // stack + // However, we only allow a single FakeTensorMode onto the stack at a time + // (Pushing additional FakeTensorModes onto the stack is a no-op) + std::array< + c10::optional>, + static_cast(TorchDispatchModeKey::NUM_MODE_KEYS)> + infra_modes_; +}; + +C10_API bool dispatch_mode_enabled(); + +C10_API std::string to_string(TorchDispatchModeKey mode_key); + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..ce32411d3b095fbfccae686c3c12f5ef08ac2d85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h @@ -0,0 +1,91 @@ +#pragma once + +#include + +namespace c10::impl { + +/** + * An implementation of DeviceGuardImplInterface which delegates + * to virtual dispatch on the DeviceGuardImpl registry. + */ +class VirtualGuardImpl final : public DeviceGuardImplInterface { + public: + VirtualGuardImpl(DeviceType device_type) + : impl_(getDeviceGuardImpl(device_type)) {} + // This constructor exists purely for testing + VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {} + + // Copying and moving is OK! + VirtualGuardImpl(const VirtualGuardImpl&) = default; + VirtualGuardImpl& operator=(const VirtualGuardImpl&) = default; + VirtualGuardImpl(VirtualGuardImpl&&) noexcept = default; + VirtualGuardImpl& operator=(VirtualGuardImpl&&) noexcept = default; + + DeviceType type() const override { + return impl_->type(); + } + Device exchangeDevice(Device d) const override { + return impl_->exchangeDevice(d); + } + Device getDevice() const override { + return impl_->getDevice(); + } + void setDevice(Device d) const override { + impl_->setDevice(d); + } + void uncheckedSetDevice(Device d) const noexcept override { + impl_->uncheckedSetDevice(d); + } + Stream getStream(Device d) const noexcept override { + return impl_->getStream(d); + } + Stream getDefaultStream(Device d) const override { + return impl_->getDefaultStream(d); + } + Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) + const override { + return impl_->getStreamFromGlobalPool(d, isHighPriority); + } + Stream exchangeStream(Stream s) const noexcept override { + return impl_->exchangeStream(s); + } + DeviceIndex deviceCount() const noexcept override { + return impl_->deviceCount(); + } + + // Event functions + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override { + impl_->record(event, stream, device_index, flag); + } + void block(void* event, const Stream& stream) const override { + impl_->block(event, stream); + } + bool queryEvent(void* event) const override { + return impl_->queryEvent(event); + } + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override { + impl_->destroyEvent(event, device_index); + } + + bool queryStream(const Stream& stream) const override { + return impl_->queryStream(stream); + } + void synchronizeStream(const Stream& stream) const override { + impl_->synchronizeStream(stream); + } + + void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream) + const override { + impl_->recordDataPtrOnStream(data_ptr, stream); + } + + private: + const DeviceGuardImplInterface* impl_ = nullptr; +}; + +} // namespace c10::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..ee32a0f463068dc91bffb7ee2c8f736893b87b81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include + +namespace c10 { + +C10_API void* alloc_cpu(size_t nbytes); +C10_API void free_cpu(void* data); + +} // namespace c10 diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUCachingAllocator.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUCachingAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..683654263a473da07f95bd130063e1f9b2982b68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUCachingAllocator.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +namespace c10::xpu::XPUCachingAllocator { + +C10_XPU_API Allocator* get(); + +C10_XPU_API void init(DeviceIndex device_count); + +C10_XPU_API void emptyCache(); + +C10_XPU_API void* raw_alloc(size_t size); + +C10_XPU_API void raw_delete(void* ptr); + +C10_XPU_API void recordStream(const DataPtr& dataPtr, XPUStream stream); + +} // namespace c10::xpu::XPUCachingAllocator diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUDeviceProp.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUDeviceProp.h new file mode 100644 index 0000000000000000000000000000000000000000..2627ce8620d5036337ec2f8936cb5804cce462d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUDeviceProp.h @@ -0,0 +1,155 @@ +#pragma once + +#include +#include + +namespace c10::xpu { + +#define AT_FORALL_XPU_DEVICE_PROPERTIES(_) \ + /* the device name of this SYCL device. */ \ + _(name) \ + \ + /* the device type associated with the device. */ \ + _(device_type) \ + \ + /* the vendor of this SYCL device. */ \ + _(vendor) \ + \ + /* a backend-defined driver version as a std::string. */ \ + _(driver_version) \ + \ + /* the SYCL version as a std::string in the form . */ \ + _(version) \ + \ + /* true if the SYCL device is available. Otherwise, return false. */ \ + _(is_available) \ + \ + /* the maximum size in bytes of the arguments that can be passed to a \ + * kernel. */ \ + _(max_parameter_size) \ + \ + /* the number of parallel compute units available to the device. */ \ + _(max_compute_units) \ + \ + /* the maximum dimensions that specify the global and local work-item IDs \ + * used by the data parallel execution model. */ \ + _(max_work_item_dimensions) \ + \ + /* the maximum number of workitems that are permitted in a work-group \ + * executing a kernel on a single compute unit. */ \ + _(max_work_group_size) \ + \ + /* the maximum number of subgroups in a work-group for any kernel executed \ + * on the device. */ \ + _(max_num_sub_groups) \ + \ + /* a std::vector of size_t containing the set of sub-group sizes supported \ + * by the device. */ \ + _(sub_group_sizes) \ + \ + /* the maximum configured clock frequency of this SYCL device in MHz. */ \ + _(max_clock_frequency) \ + \ + /* the default compute device address space size specified as an unsigned \ + * integer value in bits. Must return either 32 or 64. */ \ + _(address_bits) \ + \ + /* the maximum size of memory object allocation in bytes. */ \ + _(max_mem_alloc_size) \ + \ + /* the minimum value in bits of the largest supported SYCL built-in data \ + * type if this SYCL device is not of device type \ + * sycl::info::device_type::custom. */ \ + _(mem_base_addr_align) \ + \ + /* a std::vector of info::fp_config describing the half/single/double \ + * precision floating-point capability of this SYCL device. */ \ + _(half_fp_config) \ + _(single_fp_config) \ + _(double_fp_config) \ + \ + /* the size of global device memory in bytes. */ \ + _(global_mem_size) \ + \ + /* the type of global memory cache supported. */ \ + _(global_mem_cache_type) \ + \ + /* the size of global memory cache in bytes. */ \ + _(global_mem_cache_size) \ + \ + /* the size of global memory cache line in bytes. */ \ + _(global_mem_cache_line_size) \ + \ + /* the type of local memory supported. */ \ + _(local_mem_type) \ + \ + /* the size of local memory arena in bytes. */ \ + _(local_mem_size) \ + \ + /* the maximum number of sub-devices that can be created when this device is \ + * partitioned. */ \ + _(partition_max_sub_devices) \ + \ + /* the resolution of device timer in nanoseconds. */ \ + _(profiling_timer_resolution) \ + \ + /* the preferred native vector width size for built-in scalar types that can \ + * be put into vectors. */ \ + _(preferred_vector_width_char) \ + _(preferred_vector_width_short) \ + _(preferred_vector_width_int) \ + _(preferred_vector_width_long) \ + _(preferred_vector_width_float) \ + _(preferred_vector_width_double) \ + _(preferred_vector_width_half) \ + \ + /* the native ISA vector width. The vector width is defined as the number of \ + * scalar elements that can be stored in the vector. */ \ + _(native_vector_width_char) \ + _(native_vector_width_short) \ + _(native_vector_width_int) \ + _(native_vector_width_long) \ + _(native_vector_width_float) \ + _(native_vector_width_double) \ + _(native_vector_width_half) + +#define AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(_) \ + /* the number of EUs associated with the Intel GPU. */ \ + _(gpu_eu_count, 512) \ + \ + /* the number of EUs in a subslice. */ \ + _(gpu_eu_count_per_subslice, 8) \ + \ + /* the simd width of EU of GPU. */ \ + _(gpu_eu_simd_width, 8) \ + \ + /* the number of hardware threads per EU of GPU. */ \ + _(gpu_hw_threads_per_eu, 8) + +#define _DEFINE_SYCL_PROP(ns, property, member) \ + ns::property::return_type member; + +#define DEFINE_DEVICE_PROP(property) \ + _DEFINE_SYCL_PROP(sycl::info::device, property, property) + +#define DEFINE_PLATFORM_PROP(property, member) \ + _DEFINE_SYCL_PROP(sycl::info::platform, property, member) + +#define DEFINE_EXT_DEVICE_PROP(property, ...) \ + _DEFINE_SYCL_PROP(sycl::ext::intel::info::device, property, property) + +struct C10_XPU_API DeviceProp { + AT_FORALL_XPU_DEVICE_PROPERTIES(DEFINE_DEVICE_PROP); + + // the platform name. + DEFINE_PLATFORM_PROP(name, platform_name); + + AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(DEFINE_EXT_DEVICE_PROP) +}; + +#undef _DEFINE_SYCL_PROP +#undef DEFINE_DEVICE_PROP +#undef DEFINE_PLATFORM_PROP +#undef DEFINE_EXT_DEVICE_PROP + +} // namespace c10::xpu diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUException.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUException.h new file mode 100644 index 0000000000000000000000000000000000000000..9bc64ec3f39b7b6afed238a2b078a2ef4e6952f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUException.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include + +namespace c10::xpu { + +static inline sycl::async_handler asyncHandler = [](sycl::exception_list el) { + if (el.size() == 0) { + return; + } + for (const auto& e : el) { + try { + std::rethrow_exception(e); + } catch (sycl::exception& e) { + TORCH_WARN("SYCL Exception: ", e.what()); + } + } + throw; +}; + +} // namespace c10::xpu diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUFunctions.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..126d1d5fe66bf041c1fac66757ba1a7dba40b129 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUFunctions.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +// The naming convention used here matches the naming convention of torch.xpu + +namespace c10::xpu { + +// Log a warning only once if no devices are detected. +C10_XPU_API DeviceIndex device_count(); + +// Throws an error if no devices are detected. +C10_XPU_API DeviceIndex device_count_ensure_non_zero(); + +C10_XPU_API DeviceIndex current_device(); + +C10_XPU_API void set_device(DeviceIndex device); + +C10_XPU_API DeviceIndex exchange_device(DeviceIndex device); + +C10_XPU_API DeviceIndex maybe_exchange_device(DeviceIndex to_device); + +C10_XPU_API sycl::device& get_raw_device(DeviceIndex device); + +C10_XPU_API sycl::context& get_device_context(); + +C10_XPU_API void get_device_properties( + DeviceProp* device_prop, + DeviceIndex device); + +C10_XPU_API DeviceIndex get_device_idx_from_pointer(void* ptr); + +} // namespace c10::xpu diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUMacros.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUMacros.h new file mode 100644 index 0000000000000000000000000000000000000000..fc6aad92229c2fe22ca52bfb27f458d1e63165d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUMacros.h @@ -0,0 +1,19 @@ +#pragma once + +// See c10/macros/Export.h for a detailed explanation of what the function +// of these macros are. We need one set of macros for every separate library +// we build. + +#if defined(__GNUC__) +#define C10_XPU_EXPORT __attribute__((__visibility__("default"))) +#else // defined(__GNUC__) +#define C10_XPU_EXPORT +#endif // defined(__GNUC__) +#define C10_XPU_IMPORT C10_XPU_EXPORT + +// This one is being used by libc10_xpu.so +#ifdef C10_XPU_BUILD_MAIN_LIB +#define C10_XPU_API C10_XPU_EXPORT +#else +#define C10_XPU_API C10_XPU_IMPORT +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUStream.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUStream.h new file mode 100644 index 0000000000000000000000000000000000000000..6908e6c432056b9f8246f33c2ff41c0720c45f9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUStream.h @@ -0,0 +1,162 @@ +#pragma once + +#include +#include + +namespace c10::xpu { + +/* + * Note [Stream Management] + * + * An XPUStream is an abstraction of an actual SYCL queue in which SYCL kernel + * can execute. Currently, there are several pools per device to manage SYCL + * queue, and a device's pool is lazily created. + * + * There are two pools per device. The first pool contains "normal priority" + * queues. The second pool is the "high priority" queues. There are 32 queues in + * per pool per device, and when a queue is requested one of these queues is + * returned round-robin. That is, the first queue requested is at index 0, the + * second at index 1... to index 31, then index 0 again. + * + * This means that if 33 queues are requested, the first and last queues + * requested are actually the same queue (under the covers) and kernels enqueued + * on them cannot run concurrently. + * + * It is safe to enqueue a kernel on the same queue from two different + * threads as the SYCL specification described. + */ + +static constexpr int max_compile_time_stream_priorities = 2; + +/* + * This serves as a wrapper around c10::Stream and acts as a representation for + * a SYCL queue. On each device, a SYCL queue pool consists of kStreamsPerPool + * queues, and you can access a particular queue by its index. The index is + * extracted from XPUStream.id(). + */ +class C10_XPU_API XPUStream { + public: + enum Unchecked { UNCHECKED }; + + // Construct a XPUStream from a Stream. This construction is checked, and + // will raise an error if the Stream is not, in fact, a XPU stream. + explicit XPUStream(Stream stream) : stream_(stream) { + TORCH_CHECK(stream_.device_type() == DeviceType::XPU); + } + + // Construct a XPUStream from a Stream with no error checking. + explicit XPUStream(Unchecked, Stream stream) : stream_(stream) {} + + bool operator==(const XPUStream& other) const noexcept { + return unwrap() == other.unwrap(); + } + + bool operator!=(const XPUStream& other) const noexcept { + return unwrap() != other.unwrap(); + } + + operator sycl::queue&() const { + return queue(); + } + + operator Stream() const { + return unwrap(); + } + + DeviceType device_type() const { + return DeviceType::XPU; + } + + DeviceIndex device_index() const { + return stream_.device_index(); + } + + Device device() const { + return Device(DeviceType::XPU, device_index()); + } + + // Return the stream ID corresponding to this particular stream. StreamId is + /// a int64_t representation generated by its type and index. + StreamId id() const { + return stream_.id(); + } + + bool query() const { + return queue().ext_oneapi_empty(); + } + + void synchronize() const { + queue().wait_and_throw(); + } + + int priority() const; + + // Explicit conversion to sycl::queue&. + sycl::queue& queue() const; + + Stream unwrap() const { + return stream_; + } + + struct c10::StreamData3 pack3() const { + return stream_.pack3(); + } + + static XPUStream unpack3( + StreamId stream_id, + DeviceIndex device_index, + DeviceType device_type) { + return XPUStream(Stream::unpack3(stream_id, device_index, device_type)); + } + + static std::tuple priority_range() { + return std::make_tuple(0, -max_compile_time_stream_priorities + 1); + } + + private: + Stream stream_; +}; + +/** + * Get a stream from the pool in a round-robin fashion. + * + * You can request a stream from the high priority pool by setting + * isHighPriority to true, or a priority value for a specific device by setting + * device. + */ +C10_XPU_API XPUStream +getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1); +// The priority number lower, the priority higher. +C10_XPU_API XPUStream +getStreamFromPool(const int priority, DeviceIndex device = -1); + +/** + * Get the current XPU stream, for the passed XPU device, or for the current + * device if no device index is passed. + */ +C10_XPU_API XPUStream getCurrentXPUStream(DeviceIndex device = -1); + +/** + * Set the current stream on the device of the passed in stream to be the passed + * in stream. + */ +C10_XPU_API void setCurrentXPUStream(XPUStream stream); + +C10_XPU_API std::ostream& operator<<(std::ostream& stream, const XPUStream& s); + +/** + * Block all reserved SYCL queues in the stream pools on the device, and wait + * for their synchronizations. + */ +C10_XPU_API void syncStreamsOnDevice(DeviceIndex device = -1); + +} // namespace c10::xpu + +namespace std { +template <> +struct hash { + size_t operator()(c10::xpu::XPUStream s) const noexcept { + return std::hash{}(s.unwrap()); + } +}; +} // namespace std diff --git a/venv/lib/python3.10/site-packages/torch/include/c10/xpu/impl/XPUGuardImpl.h b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/impl/XPUGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..6647e25917755f64bf912d126cf45f5922233d23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/c10/xpu/impl/XPUGuardImpl.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace c10::xpu::impl { + +struct XPUGuardImpl final : public c10::impl::DeviceGuardImplInterface { + static constexpr DeviceType static_type = kXPU; + + XPUGuardImpl() = default; + + explicit XPUGuardImpl(DeviceType t) { + TORCH_INTERNAL_ASSERT(t == kXPU); + } + + DeviceType type() const override { + return kXPU; + } + + Device exchangeDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_xpu()); + const auto old_device_index = c10::xpu::exchange_device(d.index()); + return Device(kXPU, old_device_index); + } + + Device getDevice() const override { + const auto device = c10::xpu::current_device(); + return Device(kXPU, device); + } + + void setDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_xpu()); + c10::xpu::set_device(d.index()); + } + + void uncheckedSetDevice(Device d) const noexcept override { + c10::xpu::set_device(d.index()); + } + + Stream getStream(Device d) const noexcept override { + return getCurrentXPUStream(d.index()).unwrap(); + } + + Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) + const override { + return getStreamFromPool(isHighPriority, d.index()); + } + + // NB: These do NOT set the current device + Stream exchangeStream(Stream s) const noexcept override { + const XPUStream stream(s); + const auto old_stream = getCurrentXPUStream(s.device().index()); + setCurrentXPUStream(stream); + return old_stream.unwrap(); + } + + DeviceIndex deviceCount() const noexcept override { + return c10::xpu::device_count(); + } + + // Event-related functions + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override {} + + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override { + TORCH_CHECK( + device_index == -1 || device_index == stream.device_index(), + "Event device index ", + device_index, + " does not match recording stream's device index ", + stream.device_index(), + "."); + + auto* xpu_event = reinterpret_cast(*event); + const XPUStream xpu_stream{stream}; + *xpu_event = xpu_stream.queue().ext_oneapi_submit_barrier(); + } + + void block(void* event, const Stream& stream) const override { + if (!event) + return; + auto* xpu_event = reinterpret_cast(event); + std::vector event_list{*xpu_event}; + const XPUStream xpu_stream(stream); + xpu_stream.queue().ext_oneapi_submit_barrier(event_list); + } + + bool queryEvent(void* event) const override { + using namespace sycl::info; + if (!event) + return true; + auto* xpu_event = reinterpret_cast(event); + return xpu_event->get_info() == + event_command_status::complete; + } + + // Stream-related functions + bool queryStream(const Stream& stream) const override { + const XPUStream xpu_stream{stream}; + return xpu_stream.query(); + } + + void synchronizeStream(const Stream& stream) const override { + const XPUStream xpu_stream{stream}; + xpu_stream.synchronize(); + } + + void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream) + const override { + const XPUStream xpu_stream{stream}; + XPUCachingAllocator::recordStream(data_ptr, xpu_stream); + } +}; + +} // namespace c10::xpu::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/pybind11/attr.h b/venv/lib/python3.10/site-packages/torch/include/pybind11/attr.h new file mode 100644 index 0000000000000000000000000000000000000000..1044db94d906ac5fcf6faab6ac7668187314598f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/pybind11/attr.h @@ -0,0 +1,690 @@ +/* + pybind11/attr.h: Infrastructure for processing custom + type and function attributes + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "cast.h" + +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/// \addtogroup annotations +/// @{ + +/// Annotation for methods +struct is_method { + handle class_; + explicit is_method(const handle &c) : class_(c) {} +}; + +/// Annotation for setters +struct is_setter {}; + +/// Annotation for operators +struct is_operator {}; + +/// Annotation for classes that cannot be subclassed +struct is_final {}; + +/// Annotation for parent scope +struct scope { + handle value; + explicit scope(const handle &s) : value(s) {} +}; + +/// Annotation for documentation +struct doc { + const char *value; + explicit doc(const char *value) : value(value) {} +}; + +/// Annotation for function names +struct name { + const char *value; + explicit name(const char *value) : value(value) {} +}; + +/// Annotation indicating that a function is an overload associated with a given "sibling" +struct sibling { + handle value; + explicit sibling(const handle &value) : value(value.ptr()) {} +}; + +/// Annotation indicating that a class derives from another given type +template +struct base { + + PYBIND11_DEPRECATED( + "base() was deprecated in favor of specifying 'T' as a template argument to class_") + base() = default; +}; + +/// Keep patient alive while nurse lives +template +struct keep_alive {}; + +/// Annotation indicating that a class is involved in a multiple inheritance relationship +struct multiple_inheritance {}; + +/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class +struct dynamic_attr {}; + +/// Annotation which enables the buffer protocol for a type +struct buffer_protocol {}; + +/// Annotation which requests that a special metaclass is created for a type +struct metaclass { + handle value; + + PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.") + metaclass() = default; + + /// Override pybind11's default metaclass + explicit metaclass(handle value) : value(value) {} +}; + +/// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that +/// may be used to customize the Python type. +/// +/// The callback is invoked immediately before `PyType_Ready`. +/// +/// Note: This is an advanced interface, and uses of it may require changes to +/// work with later versions of pybind11. You may wish to consult the +/// implementation of `make_new_python_type` in `detail/classes.h` to understand +/// the context in which the callback will be run. +struct custom_type_setup { + using callback = std::function; + + explicit custom_type_setup(callback value) : value(std::move(value)) {} + + callback value; +}; + +/// Annotation that marks a class as local to the module: +struct module_local { + const bool value; + constexpr explicit module_local(bool v = true) : value(v) {} +}; + +/// Annotation to mark enums as an arithmetic type +struct arithmetic {}; + +/// Mark a function for addition at the beginning of the existing overload chain instead of the end +struct prepend {}; + +/** \rst + A call policy which places one or more guard variables (``Ts...``) around the function call. + + For example, this definition: + + .. code-block:: cpp + + m.def("foo", foo, py::call_guard()); + + is equivalent to the following pseudocode: + + .. code-block:: cpp + + m.def("foo", [](args...) { + T scope_guard; + return foo(args...); // forwarded arguments + }); + \endrst */ +template +struct call_guard; + +template <> +struct call_guard<> { + using type = detail::void_type; +}; + +template +struct call_guard { + static_assert(std::is_default_constructible::value, + "The guard type must be default constructible"); + + using type = T; +}; + +template +struct call_guard { + struct type { + T guard{}; // Compose multiple guard types with left-to-right default-constructor order + typename call_guard::type next{}; + }; +}; + +/// @} annotations + +PYBIND11_NAMESPACE_BEGIN(detail) +/* Forward declarations */ +enum op_id : int; +enum op_type : int; +struct undefined_t; +template +struct op_; +void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret); + +/// Internal data structure which holds metadata about a keyword argument +struct argument_record { + const char *name; ///< Argument name + const char *descr; ///< Human-readable version of the argument value + handle value; ///< Associated Python object + bool convert : 1; ///< True if the argument is allowed to convert when loading + bool none : 1; ///< True if None is allowed when loading + + argument_record(const char *name, const char *descr, handle value, bool convert, bool none) + : name(name), descr(descr), value(value), convert(convert), none(none) {} +}; + +/// Internal data structure which holds metadata about a bound function (signature, overloads, +/// etc.) +struct function_record { + function_record() + : is_constructor(false), is_new_style_constructor(false), is_stateless(false), + is_operator(false), is_method(false), is_setter(false), has_args(false), + has_kwargs(false), prepend(false) {} + + /// Function name + char *name = nullptr; /* why no C++ strings? They generate heavier code.. */ + + // User-specified documentation string + char *doc = nullptr; + + /// Human-readable version of the function signature + char *signature = nullptr; + + /// List of registered keyword arguments + std::vector args; + + /// Pointer to lambda function which converts arguments and performs the actual call + handle (*impl)(function_call &) = nullptr; + + /// Storage for the wrapped function pointer and captured data, if any + void *data[3] = {}; + + /// Pointer to custom destructor for 'data' (if needed) + void (*free_data)(function_record *ptr) = nullptr; + + /// Return value policy associated with this function + return_value_policy policy = return_value_policy::automatic; + + /// True if name == '__init__' + bool is_constructor : 1; + + /// True if this is a new-style `__init__` defined in `detail/init.h` + bool is_new_style_constructor : 1; + + /// True if this is a stateless function pointer + bool is_stateless : 1; + + /// True if this is an operator (__add__), etc. + bool is_operator : 1; + + /// True if this is a method + bool is_method : 1; + + /// True if this is a setter + bool is_setter : 1; + + /// True if the function has a '*args' argument + bool has_args : 1; + + /// True if the function has a '**kwargs' argument + bool has_kwargs : 1; + + /// True if this function is to be inserted at the beginning of the overload resolution chain + bool prepend : 1; + + /// Number of arguments (including py::args and/or py::kwargs, if present) + std::uint16_t nargs; + + /// Number of leading positional arguments, which are terminated by a py::args or py::kwargs + /// argument or by a py::kw_only annotation. + std::uint16_t nargs_pos = 0; + + /// Number of leading arguments (counted in `nargs`) that are positional-only + std::uint16_t nargs_pos_only = 0; + + /// Python method object + PyMethodDef *def = nullptr; + + /// Python handle to the parent scope (a class or a module) + handle scope; + + /// Python handle to the sibling function representing an overload chain + handle sibling; + + /// Pointer to next overload + function_record *next = nullptr; +}; + +/// Special data structure which (temporarily) holds metadata about a bound class +struct type_record { + PYBIND11_NOINLINE type_record() + : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false), + default_holder(true), module_local(false), is_final(false) {} + + /// Handle to the parent scope + handle scope; + + /// Name of the class + const char *name = nullptr; + + // Pointer to RTTI type_info data structure + const std::type_info *type = nullptr; + + /// How large is the underlying C++ type? + size_t type_size = 0; + + /// What is the alignment of the underlying C++ type? + size_t type_align = 0; + + /// How large is the type's holder? + size_t holder_size = 0; + + /// The global operator new can be overridden with a class-specific variant + void *(*operator_new)(size_t) = nullptr; + + /// Function pointer to class_<..>::init_instance + void (*init_instance)(instance *, const void *) = nullptr; + + /// Function pointer to class_<..>::dealloc + void (*dealloc)(detail::value_and_holder &) = nullptr; + + /// List of base classes of the newly created type + list bases; + + /// Optional docstring + const char *doc = nullptr; + + /// Custom metaclass (optional) + handle metaclass; + + /// Custom type setup. + custom_type_setup::callback custom_type_setup_callback; + + /// Multiple inheritance marker + bool multiple_inheritance : 1; + + /// Does the class manage a __dict__? + bool dynamic_attr : 1; + + /// Does the class implement the buffer protocol? + bool buffer_protocol : 1; + + /// Is the default (unique_ptr) holder type used? + bool default_holder : 1; + + /// Is the class definition local to the module shared object? + bool module_local : 1; + + /// Is the class inheritable from python classes? + bool is_final : 1; + + PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) { + auto *base_info = detail::get_type_info(base, false); + if (!base_info) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + + "\" referenced unknown base type \"" + tname + "\""); + } + + if (default_holder != base_info->default_holder) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + "\" " + + (default_holder ? "does not have" : "has") + + " a non-default holder type while its base \"" + tname + "\" " + + (base_info->default_holder ? "does not" : "does")); + } + + bases.append((PyObject *) base_info->type); + +#if PY_VERSION_HEX < 0x030B0000 + dynamic_attr |= base_info->type->tp_dictoffset != 0; +#else + dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0; +#endif + + if (caster) { + base_info->implicit_casts.emplace_back(type, caster); + } + } +}; + +inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) { + args.reserve(f.nargs); + args_convert.reserve(f.nargs); +} + +/// Tag for a new-style `__init__` defined in `detail/init.h` +struct is_new_style_constructor {}; + +/** + * Partial template specializations to process custom attributes provided to + * cpp_function_ and class_. These are either used to initialize the respective + * fields in the type_record and function_record data structures or executed at + * runtime to deal with custom call policies (e.g. keep_alive). + */ +template +struct process_attribute; + +template +struct process_attribute_default { + /// Default implementation: do nothing + static void init(const T &, function_record *) {} + static void init(const T &, type_record *) {} + static void precall(function_call &) {} + static void postcall(function_call &, handle) {} +}; + +/// Process an attribute specifying the function's name +template <> +struct process_attribute : process_attribute_default { + static void init(const name &n, function_record *r) { r->name = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring +template <> +struct process_attribute : process_attribute_default { + static void init(const doc &n, function_record *r) { r->doc = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring (provided as a C-style string) +template <> +struct process_attribute : process_attribute_default { + static void init(const char *d, function_record *r) { r->doc = const_cast(d); } + static void init(const char *d, type_record *r) { r->doc = d; } +}; +template <> +struct process_attribute : process_attribute {}; + +/// Process an attribute indicating the function's return value policy +template <> +struct process_attribute : process_attribute_default { + static void init(const return_value_policy &p, function_record *r) { r->policy = p; } +}; + +/// Process an attribute which indicates that this is an overloaded function associated with a +/// given sibling +template <> +struct process_attribute : process_attribute_default { + static void init(const sibling &s, function_record *r) { r->sibling = s.value; } +}; + +/// Process an attribute which indicates that this function is a method +template <> +struct process_attribute : process_attribute_default { + static void init(const is_method &s, function_record *r) { + r->is_method = true; + r->scope = s.class_; + } +}; + +/// Process an attribute which indicates that this function is a setter +template <> +struct process_attribute : process_attribute_default { + static void init(const is_setter &, function_record *r) { r->is_setter = true; } +}; + +/// Process an attribute which indicates the parent scope of a method +template <> +struct process_attribute : process_attribute_default { + static void init(const scope &s, function_record *r) { r->scope = s.value; } +}; + +/// Process an attribute which indicates that this function is an operator +template <> +struct process_attribute : process_attribute_default { + static void init(const is_operator &, function_record *r) { r->is_operator = true; } +}; + +template <> +struct process_attribute + : process_attribute_default { + static void init(const is_new_style_constructor &, function_record *r) { + r->is_new_style_constructor = true; + } +}; + +inline void check_kw_only_arg(const arg &a, function_record *r) { + if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) { + pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or " + "args() argument"); + } +} + +inline void append_self_arg_if_needed(function_record *r) { + if (r->is_method && r->args.empty()) { + r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false); + } +} + +/// Process a keyword argument attribute (*without* a default value) +template <> +struct process_attribute : process_attribute_default { + static void init(const arg &a, function_record *r) { + append_self_arg_if_needed(r); + r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none); + + check_kw_only_arg(a, r); + } +}; + +/// Process a keyword argument attribute (*with* a default value) +template <> +struct process_attribute : process_attribute_default { + static void init(const arg_v &a, function_record *r) { + if (r->is_method && r->args.empty()) { + r->args.emplace_back( + "self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false); + } + + if (!a.value) { +#if defined(PYBIND11_DETAILED_ERROR_MESSAGES) + std::string descr("'"); + if (a.name) { + descr += std::string(a.name) + ": "; + } + descr += a.type + "'"; + if (r->is_method) { + if (r->name) { + descr += " in method '" + (std::string) str(r->scope) + "." + + (std::string) r->name + "'"; + } else { + descr += " in method of '" + (std::string) str(r->scope) + "'"; + } + } else if (r->name) { + descr += " in function '" + (std::string) r->name + "'"; + } + pybind11_fail("arg(): could not convert default argument " + descr + + " into a Python object (type not registered yet?)"); +#else + pybind11_fail("arg(): could not convert default argument " + "into a Python object (type not registered yet?). " + "#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for " + "more information."); +#endif + } + r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none); + + check_kw_only_arg(a, r); + } +}; + +/// Process a keyword-only-arguments-follow pseudo argument +template <> +struct process_attribute : process_attribute_default { + static void init(const kw_only &, function_record *r) { + append_self_arg_if_needed(r); + if (r->has_args && r->nargs_pos != static_cast(r->args.size())) { + pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative " + "argument location (or omit kw_only() entirely)"); + } + r->nargs_pos = static_cast(r->args.size()); + } +}; + +/// Process a positional-only-argument maker +template <> +struct process_attribute : process_attribute_default { + static void init(const pos_only &, function_record *r) { + append_self_arg_if_needed(r); + r->nargs_pos_only = static_cast(r->args.size()); + if (r->nargs_pos_only > r->nargs_pos) { + pybind11_fail("pos_only(): cannot follow a py::args() argument"); + } + // It also can't follow a kw_only, but a static_assert in pybind11.h checks that + } +}; + +/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees +/// that) +template +struct process_attribute::value>> + : process_attribute_default { + static void init(const handle &h, type_record *r) { r->bases.append(h); } +}; + +/// Process a parent class attribute (deprecated, does not support multiple inheritance) +template +struct process_attribute> : process_attribute_default> { + static void init(const base &, type_record *r) { r->add_base(typeid(T), nullptr); } +}; + +/// Process a multiple inheritance attribute +template <> +struct process_attribute : process_attribute_default { + static void init(const multiple_inheritance &, type_record *r) { + r->multiple_inheritance = true; + } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; } +}; + +template <> +struct process_attribute { + static void init(const custom_type_setup &value, type_record *r) { + r->custom_type_setup_callback = value.value; + } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const is_final &, type_record *r) { r->is_final = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const module_local &l, type_record *r) { r->module_local = l.value; } +}; + +/// Process a 'prepend' attribute, putting this at the beginning of the overload chain +template <> +struct process_attribute : process_attribute_default { + static void init(const prepend &, function_record *r) { r->prepend = true; } +}; + +/// Process an 'arithmetic' attribute for enums (does nothing here) +template <> +struct process_attribute : process_attribute_default {}; + +template +struct process_attribute> : process_attribute_default> {}; + +/** + * Process a keep_alive call policy -- invokes keep_alive_impl during the + * pre-call handler if both Nurse, Patient != 0 and use the post-call handler + * otherwise + */ +template +struct process_attribute> + : public process_attribute_default> { + template = 0> + static void precall(function_call &call) { + keep_alive_impl(Nurse, Patient, call, handle()); + } + template = 0> + static void postcall(function_call &, handle) {} + template = 0> + static void precall(function_call &) {} + template = 0> + static void postcall(function_call &call, handle ret) { + keep_alive_impl(Nurse, Patient, call, ret); + } +}; + +/// Recursively iterate over variadic template arguments +template +struct process_attributes { + static void init(const Args &...args, function_record *r) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r); + using expander = int[]; + (void) expander{ + 0, ((void) process_attribute::type>::init(args, r), 0)...}; + } + static void init(const Args &...args, type_record *r) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r); + using expander = int[]; + (void) expander{0, + (process_attribute::type>::init(args, r), 0)...}; + } + static void precall(function_call &call) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call); + using expander = int[]; + (void) expander{0, + (process_attribute::type>::precall(call), 0)...}; + } + static void postcall(function_call &call, handle fn_ret) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret); + using expander = int[]; + (void) expander{ + 0, (process_attribute::type>::postcall(call, fn_ret), 0)...}; + } +}; + +template +using is_call_guard = is_instantiation; + +/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found) +template +using extract_guard_t = typename exactly_one_t, Extra...>::type; + +/// Check the number of named arguments at compile time +template ::value...), + size_t self = constexpr_sum(std::is_same::value...)> +constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs); + return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs; +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/venv/lib/python3.10/site-packages/torch/include/pybind11/buffer_info.h b/venv/lib/python3.10/site-packages/torch/include/pybind11/buffer_info.h new file mode 100644 index 0000000000000000000000000000000000000000..b99ee8bef422a6dc65d27e364eded545615e9ccc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/pybind11/buffer_info.h @@ -0,0 +1,208 @@ +/* + pybind11/buffer_info.h: Python buffer object interface + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +PYBIND11_NAMESPACE_BEGIN(detail) + +// Default, C-style strides +inline std::vector c_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + if (ndim > 0) { + for (size_t i = ndim - 1; i > 0; --i) { + strides[i - 1] = strides[i] * shape[i]; + } + } + return strides; +} + +// F-style strides; default when constructing an array_t with `ExtraFlags & f_style` +inline std::vector f_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + for (size_t i = 1; i < ndim; ++i) { + strides[i] = strides[i - 1] * shape[i - 1]; + } + return strides; +} + +template +struct compare_buffer_info; + +PYBIND11_NAMESPACE_END(detail) + +/// Information record describing a Python buffer object +struct buffer_info { + void *ptr = nullptr; // Pointer to the underlying storage + ssize_t itemsize = 0; // Size of individual items in bytes + ssize_t size = 0; // Total number of entries + std::string format; // For homogeneous buffers, this should be set to + // format_descriptor::format() + ssize_t ndim = 0; // Number of dimensions + std::vector shape; // Shape of the tensor (1 entry per dimension) + std::vector strides; // Number of bytes between adjacent entries + // (for each per dimension) + bool readonly = false; // flag to indicate if the underlying storage may be written to + + buffer_info() = default; + + buffer_info(void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t ndim, + detail::any_container shape_in, + detail::any_container strides_in, + bool readonly = false) + : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim), + shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) { + if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) { + pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length"); + } + for (size_t i = 0; i < (size_t) ndim; ++i) { + size *= shape[i]; + } + } + + template + buffer_info(T *ptr, + detail::any_container shape_in, + detail::any_container strides_in, + bool readonly = false) + : buffer_info(private_ctr_tag(), + ptr, + sizeof(T), + format_descriptor::format(), + static_cast(shape_in->size()), + std::move(shape_in), + std::move(strides_in), + readonly) {} + + buffer_info(void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t size, + bool readonly = false) + : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {} + + template + buffer_info(T *ptr, ssize_t size, bool readonly = false) + : buffer_info(ptr, sizeof(T), format_descriptor::format(), size, readonly) {} + + template + buffer_info(const T *ptr, ssize_t size, bool readonly = true) + : buffer_info( + const_cast(ptr), sizeof(T), format_descriptor::format(), size, readonly) {} + + explicit buffer_info(Py_buffer *view, bool ownview = true) + : buffer_info( + view->buf, + view->itemsize, + view->format, + view->ndim, + {view->shape, view->shape + view->ndim}, + /* Though buffer::request() requests PyBUF_STRIDES, ctypes objects + * ignore this flag and return a view with NULL strides. + * When strides are NULL, build them manually. */ + view->strides + ? std::vector(view->strides, view->strides + view->ndim) + : detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize), + (view->readonly != 0)) { + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + this->m_view = view; + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + this->ownview = ownview; + } + + buffer_info(const buffer_info &) = delete; + buffer_info &operator=(const buffer_info &) = delete; + + buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); } + + buffer_info &operator=(buffer_info &&rhs) noexcept { + ptr = rhs.ptr; + itemsize = rhs.itemsize; + size = rhs.size; + format = std::move(rhs.format); + ndim = rhs.ndim; + shape = std::move(rhs.shape); + strides = std::move(rhs.strides); + std::swap(m_view, rhs.m_view); + std::swap(ownview, rhs.ownview); + readonly = rhs.readonly; + return *this; + } + + ~buffer_info() { + if (m_view && ownview) { + PyBuffer_Release(m_view); + delete m_view; + } + } + + Py_buffer *view() const { return m_view; } + Py_buffer *&view() { return m_view; } + + /* True if the buffer item type is equivalent to `T`. */ + // To define "equivalent" by example: + // `buffer_info::item_type_is_equivalent_to(b)` and + // `buffer_info::item_type_is_equivalent_to(b)` may both be true + // on some platforms, but `int` and `unsigned` will never be equivalent. + // For the ground truth, please inspect `detail::compare_buffer_info<>`. + template + bool item_type_is_equivalent_to() const { + return detail::compare_buffer_info::compare(*this); + } + +private: + struct private_ctr_tag {}; + + buffer_info(private_ctr_tag, + void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t ndim, + detail::any_container &&shape_in, + detail::any_container &&strides_in, + bool readonly) + : buffer_info( + ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {} + + Py_buffer *m_view = nullptr; + bool ownview = false; +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +template +struct compare_buffer_info { + static bool compare(const buffer_info &b) { + // NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *` + return b.format == format_descriptor::format() && b.itemsize == (ssize_t) sizeof(T); + } +}; + +template +struct compare_buffer_info::value>> { + static bool compare(const buffer_info &b) { + return (size_t) b.itemsize == sizeof(T) + && (b.format == format_descriptor::value + || ((sizeof(T) == sizeof(long)) + && b.format == (std::is_unsigned::value ? "L" : "l")) + || ((sizeof(T) == sizeof(size_t)) + && b.format == (std::is_unsigned::value ? "N" : "n"))); + } +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/venv/lib/python3.10/site-packages/torch/include/pybind11/cast.h b/venv/lib/python3.10/site-packages/torch/include/pybind11/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..02d9488daec63655a27e13f3b86c742bfbcbae3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/pybind11/cast.h @@ -0,0 +1,1837 @@ +/* + pybind11/cast.h: Partial template specializations to cast between + C++ and Python types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "detail/descr.h" +#include "detail/type_caster_base.h" +#include "detail/typeid.h" +#include "pytypes.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +PYBIND11_WARNING_DISABLE_MSVC(4127) + +PYBIND11_NAMESPACE_BEGIN(detail) + +template +class type_caster : public type_caster_base {}; +template +using make_caster = type_caster>; + +// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T +template +typename make_caster::template cast_op_type cast_op(make_caster &caster) { + using result_t = typename make_caster::template cast_op_type; // See PR #4893 + return caster.operator result_t(); +} +template +typename make_caster::template cast_op_type::type> +cast_op(make_caster &&caster) { + using result_t = typename make_caster::template cast_op_type< + typename std::add_rvalue_reference::type>; // See PR #4893 + return std::move(caster).operator result_t(); +} + +template +class type_caster> { +private: + using caster_t = make_caster; + caster_t subcaster; + using reference_t = type &; + using subcaster_cast_op_type = typename caster_t::template cast_op_type; + + static_assert( + std::is_same::type &, subcaster_cast_op_type>::value + || std::is_same::value, + "std::reference_wrapper caster requires T to have a caster with an " + "`operator T &()` or `operator const T &()`"); + +public: + bool load(handle src, bool convert) { return subcaster.load(src, convert); } + static constexpr auto name = caster_t::name; + static handle + cast(const std::reference_wrapper &src, return_value_policy policy, handle parent) { + // It is definitely wrong to take ownership of this pointer, so mask that rvp + if (policy == return_value_policy::take_ownership + || policy == return_value_policy::automatic) { + policy = return_value_policy::automatic_reference; + } + return caster_t::cast(&src.get(), policy, parent); + } + template + using cast_op_type = std::reference_wrapper; + explicit operator std::reference_wrapper() { return cast_op(subcaster); } +}; + +#define PYBIND11_TYPE_CASTER(type, py_name) \ +protected: \ + type value; \ + \ +public: \ + static constexpr auto name = py_name; \ + template >::value, \ + int> \ + = 0> \ + static ::pybind11::handle cast( \ + T_ *src, ::pybind11::return_value_policy policy, ::pybind11::handle parent) { \ + if (!src) \ + return ::pybind11::none().release(); \ + if (policy == ::pybind11::return_value_policy::take_ownership) { \ + auto h = cast(std::move(*src), policy, parent); \ + delete src; \ + return h; \ + } \ + return cast(*src, policy, parent); \ + } \ + operator type *() { return &value; } /* NOLINT(bugprone-macro-parentheses) */ \ + operator type &() { return value; } /* NOLINT(bugprone-macro-parentheses) */ \ + operator type &&() && { return std::move(value); } /* NOLINT(bugprone-macro-parentheses) */ \ + template \ + using cast_op_type = ::pybind11::detail::movable_cast_op_type + +template +using is_std_char_type = any_of, /* std::string */ +#if defined(PYBIND11_HAS_U8STRING) + std::is_same, /* std::u8string */ +#endif + std::is_same, /* std::u16string */ + std::is_same, /* std::u32string */ + std::is_same /* std::wstring */ + >; + +template +struct type_caster::value && !is_std_char_type::value>> { + using _py_type_0 = conditional_t; + using _py_type_1 = conditional_t::value, + _py_type_0, + typename std::make_unsigned<_py_type_0>::type>; + using py_type = conditional_t::value, double, _py_type_1>; + +public: + bool load(handle src, bool convert) { + py_type py_value; + + if (!src) { + return false; + } + +#if !defined(PYPY_VERSION) + auto index_check = [](PyObject *o) { return PyIndex_Check(o); }; +#else + // In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`, + // while CPython only considers the existence of `nb_index`/`__index__`. + auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); }; +#endif + + if (std::is_floating_point::value) { + if (convert || PyFloat_Check(src.ptr())) { + py_value = (py_type) PyFloat_AsDouble(src.ptr()); + } else { + return false; + } + } else if (PyFloat_Check(src.ptr()) + || (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr()))) { + return false; + } else { + handle src_or_index = src; + // PyPy: 7.3.7's 3.8 does not implement PyLong_*'s __index__ calls. +#if PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION) + object index; + if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr()) + index = reinterpret_steal(PyNumber_Index(src.ptr())); + if (!index) { + PyErr_Clear(); + if (!convert) + return false; + } else { + src_or_index = index; + } + } +#endif + if (std::is_unsigned::value) { + py_value = as_unsigned(src_or_index.ptr()); + } else { // signed integer: + py_value = sizeof(T) <= sizeof(long) + ? (py_type) PyLong_AsLong(src_or_index.ptr()) + : (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr()); + } + } + + // Python API reported an error + bool py_err = py_value == (py_type) -1 && PyErr_Occurred(); + + // Check to see if the conversion is valid (integers should match exactly) + // Signed/unsigned checks happen elsewhere + if (py_err + || (std::is_integral::value && sizeof(py_type) != sizeof(T) + && py_value != (py_type) (T) py_value)) { + PyErr_Clear(); + if (py_err && convert && (PyNumber_Check(src.ptr()) != 0)) { + auto tmp = reinterpret_steal(std::is_floating_point::value + ? PyNumber_Float(src.ptr()) + : PyNumber_Long(src.ptr())); + PyErr_Clear(); + return load(tmp, false); + } + return false; + } + + value = (T) py_value; + return true; + } + + template + static typename std::enable_if::value, handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyFloat_FromDouble((double) src); + } + + template + static typename std::enable_if::value && std::is_signed::value + && (sizeof(U) <= sizeof(long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_SIGNED((long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value + && (sizeof(U) <= sizeof(unsigned long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src); + } + + template + static typename std::enable_if::value && std::is_signed::value + && (sizeof(U) > sizeof(long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromLongLong((long long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value + && (sizeof(U) > sizeof(unsigned long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromUnsignedLongLong((unsigned long long) src); + } + + PYBIND11_TYPE_CASTER(T, const_name::value>("int", "float")); +}; + +template +struct void_caster { +public: + bool load(handle src, bool) { + if (src && src.is_none()) { + return true; + } + return false; + } + static handle cast(T, return_value_policy /* policy */, handle /* parent */) { + return none().release(); + } + PYBIND11_TYPE_CASTER(T, const_name("None")); +}; + +template <> +class type_caster : public void_caster {}; + +template <> +class type_caster : public type_caster { +public: + using type_caster::cast; + + bool load(handle h, bool) { + if (!h) { + return false; + } + if (h.is_none()) { + value = nullptr; + return true; + } + + /* Check if this is a capsule */ + if (isinstance(h)) { + value = reinterpret_borrow(h); + return true; + } + + /* Check if this is a C++ type */ + const auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr()); + if (bases.size() == 1) { // Only allowing loading from a single-value type + value = values_and_holders(reinterpret_cast(h.ptr())).begin()->value_ptr(); + return true; + } + + /* Fail */ + return false; + } + + static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) { + if (ptr) { + return capsule(ptr).release(); + } + return none().release(); + } + + template + using cast_op_type = void *&; + explicit operator void *&() { return value; } + static constexpr auto name = const_name("capsule"); + +private: + void *value = nullptr; +}; + +template <> +class type_caster : public void_caster {}; + +template <> +class type_caster { +public: + bool load(handle src, bool convert) { + if (!src) { + return false; + } + if (src.ptr() == Py_True) { + value = true; + return true; + } + if (src.ptr() == Py_False) { + value = false; + return true; + } + if (convert || is_numpy_bool(src)) { + // (allow non-implicit conversion for numpy booleans), use strncmp + // since NumPy 1.x had an additional trailing underscore. + + Py_ssize_t res = -1; + if (src.is_none()) { + res = 0; // None is implicitly converted to False + } +#if defined(PYPY_VERSION) + // On PyPy, check that "__bool__" attr exists + else if (hasattr(src, PYBIND11_BOOL_ATTR)) { + res = PyObject_IsTrue(src.ptr()); + } +#else + // Alternate approach for CPython: this does the same as the above, but optimized + // using the CPython API so as to avoid an unneeded attribute lookup. + else if (auto *tp_as_number = src.ptr()->ob_type->tp_as_number) { + if (PYBIND11_NB_BOOL(tp_as_number)) { + res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr()); + } + } +#endif + if (res == 0 || res == 1) { + value = (res != 0); + return true; + } + PyErr_Clear(); + } + return false; + } + static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) { + return handle(src ? Py_True : Py_False).inc_ref(); + } + PYBIND11_TYPE_CASTER(bool, const_name("bool")); + +private: + // Test if an object is a NumPy boolean (without fetching the type). + static inline bool is_numpy_bool(handle object) { + const char *type_name = Py_TYPE(object.ptr())->tp_name; + // Name changed to `numpy.bool` in NumPy 2, `numpy.bool_` is needed for 1.x support + return std::strcmp("numpy.bool", type_name) == 0 + || std::strcmp("numpy.bool_", type_name) == 0; + } +}; + +// Helper class for UTF-{8,16,32} C++ stl strings: +template +struct string_caster { + using CharT = typename StringType::value_type; + + // Simplify life by being able to assume standard char sizes (the standard only guarantees + // minimums, but Python requires exact sizes) + static_assert(!std::is_same::value || sizeof(CharT) == 1, + "Unsupported char size != 1"); +#if defined(PYBIND11_HAS_U8STRING) + static_assert(!std::is_same::value || sizeof(CharT) == 1, + "Unsupported char8_t size != 1"); +#endif + static_assert(!std::is_same::value || sizeof(CharT) == 2, + "Unsupported char16_t size != 2"); + static_assert(!std::is_same::value || sizeof(CharT) == 4, + "Unsupported char32_t size != 4"); + // wchar_t can be either 16 bits (Windows) or 32 (everywhere else) + static_assert(!std::is_same::value || sizeof(CharT) == 2 || sizeof(CharT) == 4, + "Unsupported wchar_t size != 2/4"); + static constexpr size_t UTF_N = 8 * sizeof(CharT); + + bool load(handle src, bool) { + handle load_src = src; + if (!src) { + return false; + } + if (!PyUnicode_Check(load_src.ptr())) { + return load_raw(load_src); + } + + // For UTF-8 we avoid the need for a temporary `bytes` object by using + // `PyUnicode_AsUTF8AndSize`. + if (UTF_N == 8) { + Py_ssize_t size = -1; + const auto *buffer + = reinterpret_cast(PyUnicode_AsUTF8AndSize(load_src.ptr(), &size)); + if (!buffer) { + PyErr_Clear(); + return false; + } + value = StringType(buffer, static_cast(size)); + return true; + } + + auto utfNbytes + = reinterpret_steal(PyUnicode_AsEncodedString(load_src.ptr(), + UTF_N == 8 ? "utf-8" + : UTF_N == 16 ? "utf-16" + : "utf-32", + nullptr)); + if (!utfNbytes) { + PyErr_Clear(); + return false; + } + + const auto *buffer + = reinterpret_cast(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr())); + size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT); + // Skip BOM for UTF-16/32 + if (UTF_N > 8) { + buffer++; + length--; + } + value = StringType(buffer, length); + + // If we're loading a string_view we need to keep the encoded Python object alive: + if (IsView) { + loader_life_support::add_patient(utfNbytes); + } + + return true; + } + + static handle + cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) { + const char *buffer = reinterpret_cast(src.data()); + auto nbytes = ssize_t(src.size() * sizeof(CharT)); + handle s = decode_utfN(buffer, nbytes); + if (!s) { + throw error_already_set(); + } + return s; + } + + PYBIND11_TYPE_CASTER(StringType, const_name(PYBIND11_STRING_NAME)); + +private: + static handle decode_utfN(const char *buffer, ssize_t nbytes) { +#if !defined(PYPY_VERSION) + return UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) + : UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) + : PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr); +#else + // PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as + // well), so bypass the whole thing by just passing the encoding as a string value, which + // works properly: + return PyUnicode_Decode(buffer, + nbytes, + UTF_N == 8 ? "utf-8" + : UTF_N == 16 ? "utf-16" + : "utf-32", + nullptr); +#endif + } + + // When loading into a std::string or char*, accept a bytes/bytearray object as-is (i.e. + // without any encoding/decoding attempt). For other C++ char sizes this is a no-op. + // which supports loading a unicode from a str, doesn't take this path. + template + bool load_raw(enable_if_t::value, handle> src) { + if (PYBIND11_BYTES_CHECK(src.ptr())) { + // We were passed raw bytes; accept it into a std::string or char* + // without any encoding attempt. + const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr()); + if (!bytes) { + pybind11_fail("Unexpected PYBIND11_BYTES_AS_STRING() failure."); + } + value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr())); + return true; + } + if (PyByteArray_Check(src.ptr())) { + // We were passed a bytearray; accept it into a std::string or char* + // without any encoding attempt. + const char *bytearray = PyByteArray_AsString(src.ptr()); + if (!bytearray) { + pybind11_fail("Unexpected PyByteArray_AsString() failure."); + } + value = StringType(bytearray, (size_t) PyByteArray_Size(src.ptr())); + return true; + } + + return false; + } + + template + bool load_raw(enable_if_t::value, handle>) { + return false; + } +}; + +template +struct type_caster, + enable_if_t::value>> + : string_caster> {}; + +#ifdef PYBIND11_HAS_STRING_VIEW +template +struct type_caster, + enable_if_t::value>> + : string_caster, true> {}; +#endif + +// Type caster for C-style strings. We basically use a std::string type caster, but also add the +// ability to use None as a nullptr char* (which the string caster doesn't allow). +template +struct type_caster::value>> { + using StringType = std::basic_string; + using StringCaster = make_caster; + StringCaster str_caster; + bool none = false; + CharT one_char = 0; + +public: + bool load(handle src, bool convert) { + if (!src) { + return false; + } + if (src.is_none()) { + // Defer accepting None to other overloads (if we aren't in convert mode): + if (!convert) { + return false; + } + none = true; + return true; + } + return str_caster.load(src, convert); + } + + static handle cast(const CharT *src, return_value_policy policy, handle parent) { + if (src == nullptr) { + return pybind11::none().release(); + } + return StringCaster::cast(StringType(src), policy, parent); + } + + static handle cast(CharT src, return_value_policy policy, handle parent) { + if (std::is_same::value) { + handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr); + if (!s) { + throw error_already_set(); + } + return s; + } + return StringCaster::cast(StringType(1, src), policy, parent); + } + + explicit operator CharT *() { + return none ? nullptr : const_cast(static_cast(str_caster).c_str()); + } + explicit operator CharT &() { + if (none) { + throw value_error("Cannot convert None to a character"); + } + + auto &value = static_cast(str_caster); + size_t str_len = value.size(); + if (str_len == 0) { + throw value_error("Cannot convert empty string to a character"); + } + + // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that + // is too high, and one for multiple unicode characters (caught later), so we need to + // figure out how long the first encoded character is in bytes to distinguish between these + // two errors. We also allow want to allow unicode characters U+0080 through U+00FF, as + // those can fit into a single char value. + if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) { + auto v0 = static_cast(value[0]); + // low bits only: 0-127 + // 0b110xxxxx - start of 2-byte sequence + // 0b1110xxxx - start of 3-byte sequence + // 0b11110xxx - start of 4-byte sequence + size_t char0_bytes = (v0 & 0x80) == 0 ? 1 + : (v0 & 0xE0) == 0xC0 ? 2 + : (v0 & 0xF0) == 0xE0 ? 3 + : 4; + + if (char0_bytes == str_len) { + // If we have a 128-255 value, we can decode it into a single char: + if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx + one_char = static_cast(((v0 & 3) << 6) + + (static_cast(value[1]) & 0x3F)); + return one_char; + } + // Otherwise we have a single character, but it's > U+00FF + throw value_error("Character code point not in range(0x100)"); + } + } + + // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a + // surrogate pair with total length 2 instantly indicates a range error (but not a "your + // string was too long" error). + else if (StringCaster::UTF_N == 16 && str_len == 2) { + one_char = static_cast(value[0]); + if (one_char >= 0xD800 && one_char < 0xE000) { + throw value_error("Character code point not in range(0x10000)"); + } + } + + if (str_len != 1) { + throw value_error("Expected a character, but multi-character string found"); + } + + one_char = value[0]; + return one_char; + } + + static constexpr auto name = const_name(PYBIND11_STRING_NAME); + template + using cast_op_type = pybind11::detail::cast_op_type<_T>; +}; + +// Base implementation for std::tuple and std::pair +template